instruction
stringclasses 1
value | output
stringlengths 64
69.4k
| input
stringlengths 205
32.4k
|
---|---|---|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
static public void loadRowVectorFromFile(String file, int columns, double[] row)
throws JMetalException {
try {
BufferedReader brSrc =
new BufferedReader(
new InputStreamReader(new FileInputStream(ClassLoader.getSystemResource(file).getPath()))) ;
//BufferedReader brSrc = new BufferedReader(new FileReader(file));
loadRowVector(brSrc, columns, row);
brSrc.close();
} catch (Exception e) {
JMetalLogger.logger.log(Level.SEVERE, "Error in Benchmark.java", e);
throw new JMetalException("Error in Benchmark.java");
}
}
|
#vulnerable code
static public void loadRowVectorFromFile(String file, int columns, double[] row)
throws JMetalException {
try {
BufferedReader brSrc = new BufferedReader(new FileReader(file));
loadRowVector(brSrc, columns, row);
brSrc.close();
} catch (Exception e) {
JMetalLogger.logger.log(Level.SEVERE, "Error in Benchmark.java", e);
throw new JMetalException("Error in Benchmark.java");
}
}
#location 7
#vulnerability type RESOURCE_LEAK
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
void initIdealPoint() throws JMException, ClassNotFoundException {
for (int i = 0; i < problem_.getNumberOfObjectives(); i++) {
z_[i] = 1.0e+30;
indArray_[i] = new Solution(problem_);
problem_.evaluate(indArray_[i]);
evaluations_++;
} // for
for (int i = 0; i < populationSize_; i++) {
updateReference(population_.get(i));
} // for
}
|
#vulnerable code
void updateOfSolutions(Solution indiv, int id, int type) {
// indiv: child solution
// id: the id of current subproblem
// type: update solutions in - neighborhood (1) or whole population (otherwise)
int size;
int time;
time = 0;
if (type == 1) {
size = parentThread_.neighborhood_[id].length;
} else {
size = parentThread_.population_.size();
}
int[] perm = new int[size];
Utils.randomPermutation(perm, size);
for (int i = 0; i < size; i++) {
int k;
if (type == 1) {
k = parentThread_.neighborhood_[id][perm[i]];
} else {
k = perm[i]; // calculate the values of objective function regarding the current subproblem
}
double f1, f2;
f2 = fitnessFunction(indiv, parentThread_.lambda_[k]);
synchronized (parentThread_) {
f1 = fitnessFunction(parentThread_.population_.get(k), parentThread_.lambda_[k]);
if (f2 < f1) {
parentThread_.population_.replace(k, new Solution(indiv));
//population[k].indiv = indiv;
time++;
}
}
// the maximal number of solutions updated is not allowed to exceed 'limit'
if (time >= parentThread_.nr_) {
return;
}
}
}
#location 17
#vulnerability type THREAD_SAFETY_VIOLATION
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Override
public int compare(Solution o1, Solution o2) {
if (o1 == null) {
return 1;
} else if (o2 == null) {
return -1;
}
int flagComparatorRank = RANK_COMPARATOR.compare(o1, o2);
if (flagComparatorRank != 0) {
return flagComparatorRank;
}
/* His rank is equal, then distance crowding RANK_COMPARATOR */
double distance1 = (double)o1.getAlgorithmAttributes().getAttribute("CrowdingDistance");
double distance2 = (double)o2.getAlgorithmAttributes().getAttribute("CrowdingDistance");
if (distance1 > distance2) {
return -1;
}
if (distance1 < distance2) {
return 1;
}
return 0;
}
|
#vulnerable code
@Override
public int compare(Solution o1, Solution o2) {
if (o1 == null) {
return 1;
} else if (o2 == null) {
return -1;
}
int flagComparatorRank = RANK_COMPARATOR.compare(o1, o2);
if (flagComparatorRank != 0) {
return flagComparatorRank;
}
/* His rank is equal, then distance crowding RANK_COMPARATOR */
double distance1 = (double)o1.getAttributes().getAttribute("CrowdingDistance");
double distance2 = (double)o2.getAttributes().getAttribute("CrowdingDistance");
if (distance1 > distance2) {
return -1;
}
if (distance1 < distance2) {
return 1;
}
return 0;
}
#location 15
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
String token = req.getHeader("Authorization");
User user = Authentication.getInstance().getUsername(token);
if(user == null) {
resp.sendError(401);
return;
}
JSONObject json_resp = new JSONObject();
json_resp.put("user", user.getUsername());
json_resp.put("admin", user.isAdmin());
JSONArray rolesObj = new JSONArray();
for (Role r : user.getRoles()) {
rolesObj.add(r.getName());
}
json_resp.put("roles", rolesObj);
//Set response content type
resp.setContentType("application/json");
//Write response
json_resp.write(resp.getWriter());
}
|
#vulnerable code
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
//resp.addHeader("Access-Control-Allow-Origin", "*");
HttpSession session = req.getSession(false);
LoggedIn mLoggedIn = Session.getUserLoggedIn(session);
if(mLoggedIn == null){
resp.sendError(401);
return;
}
JSONObject json_resp = new JSONObject();
json_resp.put("user", mLoggedIn.getUserName());
json_resp.put("admin", mLoggedIn.isAdmin());
User u = UsersStruct.getInstance().getUser(mLoggedIn.getUserName());
JSONArray rolesObj = new JSONArray();
for (Role r : u.getRoles())
{
rolesObj.add(r.getName());
}
json_resp.put("roles", rolesObj);
//Set response content type
resp.setContentType("application/json");
//Write response
json_resp.write(resp.getWriter());
}
#location 18
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Override
protected void doPost(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
//resp.addHeader("Access-Control-Allow-Origin", "*");
//Try login
// Does not require admini rights.
LoggedIn mLoggedIn = Session.webappLogin(req, resp, false).getLogin();
//servletLogin(req, resp, true);//auth.login(user, pass);
if (mLoggedIn == null) {
resp.sendError(401, "Login failed");
return;
}
JSONObject json_resp = new JSONObject();
json_resp.put("user", mLoggedIn.getUserName());
json_resp.put("admin", mLoggedIn.isAdmin());
User u = UsersStruct.getInstance().getUser(mLoggedIn.getUserName());
JSONArray rolesObj = new JSONArray();
if (u!=null&&u.getRoles()!=null) {
for (Role r : u.getRoles()) {
if (r!=null)
rolesObj.add(r.getName());
}
json_resp.put("roles", rolesObj);
}
json_resp.put("token", mLoggedIn.getToken());
//Set response content type
resp.setContentType("application/json");
//Write response
json_resp.write(resp.getWriter());
}
|
#vulnerable code
@Override
protected void doPost(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
//resp.addHeader("Access-Control-Allow-Origin", "*");
//Try login
// Does not require admini rights.
LoggedIn mLoggedIn = Session.webappLogin(req, resp, false).getLogin();
//servletLogin(req, resp, true);//auth.login(user, pass);
if (mLoggedIn == null) {
resp.sendError(401, "Login failed");
return;
}
JSONObject json_resp = new JSONObject();
json_resp.put("user", mLoggedIn.getUserName());
json_resp.put("admin", mLoggedIn.isAdmin());
User u = UsersStruct.getInstance().getUser(mLoggedIn.getUserName());
JSONArray rolesObj = new JSONArray();
for (Role r : u.getRoles())
{
rolesObj.add(r.getName());
}
json_resp.put("roles", rolesObj);
json_resp.put("token", mLoggedIn.getToken());
//Set response content type
resp.setContentType("application/json");
//Write response
json_resp.write(resp.getWriter());
}
#location 19
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
public JSONResource json(URI anUri) throws IOException {
return doGET(anUri, new JSONResource());
}
|
#vulnerable code
public JSONResource json(URI anUri) throws IOException, MalformedURLException {
JSONObject json = JSONObject.class.cast(anUri.toURL().openConnection().getContent());
return new JSONResource(json);
}
#location 2
#vulnerability type RESOURCE_LEAK
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
public JSONResource json(URI anUri) throws IOException {
return doGET(anUri, new JSONResource());
}
|
#vulnerable code
public JSONResource json(URI anUri) throws IOException, MalformedURLException {
JSONObject json = JSONObject.class.cast(anUri.toURL().openConnection().getContent());
return new JSONResource(json);
}
#location 2
#vulnerability type RESOURCE_LEAK
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Override
public HWDiskStore[] getDisks() {
List<HWDiskStore> result;
result = new ArrayList<>();
readMap.clear();
writeMap.clear();
populateReadWriteMaps();
Map<String, List<Object>> vals = WmiUtil.selectObjectsFrom(null, "Win32_DiskDrive",
"Name,Manufacturer,Model,SerialNumber,Size,Index", null, DRIVE_TYPES);
for (int i = 0; i < vals.get("Name").size(); i++) {
HWDiskStore ds = new HWDiskStore();
ds.setName((String) vals.get("Name").get(i));
ds.setModel(String.format("%s %s", vals.get("Model").get(i), vals.get("Manufacturer").get(i)).trim());
// Most vendors store serial # as a hex string; convert
ds.setSerial(ParseUtil.hexStringToString((String) vals.get("SerialNumber").get(i)));
String index = vals.get("Index").get(i).toString();
if (readMap.containsKey(index)) {
ds.setReads(readMap.get(index));
}
if (writeMap.containsKey(index)) {
ds.setWrites(writeMap.get(index));
}
// If successful this line is the desired value
try {
ds.setSize(Long.parseLong((String) vals.get("Size").get(i)));
} catch (NumberFormatException e) {
// If we failed to parse, give up
// This is expected for an empty string on some drives
ds.setSize(0L);
}
result.add(ds);
}
return result.toArray(new HWDiskStore[result.size()]);
}
|
#vulnerable code
@Override
public HWDiskStore[] getDisks() {
List<HWDiskStore> result;
result = new ArrayList<>();
Map<String, List<String>> vals = WmiUtil.selectStringsFrom(null, "Win32_DiskDrive",
"Name,Manufacturer,Model,SerialNumber,Size", null);
for (int i = 0; i < vals.get("Name").size(); i++) {
HWDiskStore ds = new HWDiskStore();
ds.setName(vals.get("Name").get(i));
ds.setModel(String.format("%s %s", vals.get("Model").get(i), vals.get("Manufacturer").get(i)).trim());
// Most vendors store serial # as a hex string; convert
ds.setSerial(ParseUtil.hexStringToString(vals.get("SerialNumber").get(i)));
// If successful this line is the desired value
try {
ds.setSize(Long.parseLong(vals.get("Size").get(i)));
} catch (NumberFormatException e) {
// If we failed to parse, give up
// This is expected for an empty string on some drives
ds.setSize(0L);
}
result.add(ds);
}
return result.toArray(new HWDiskStore[result.size()]);
}
#location 11
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
public static Map<Integer, PerfCounterBlock> buildProcessMapFromPerfCounters(Collection<Integer> pids) {
Map<Integer, PerfCounterBlock> processMap = new HashMap<>();
Pair<List<String>, Map<ProcessPerformanceProperty, List<Long>>> instanceValues = ProcessInformation
.queryProcessCounters();
long now = System.currentTimeMillis(); // 1970 epoch
List<String> instances = instanceValues.getA();
Map<ProcessPerformanceProperty, List<Long>> valueMap = instanceValues.getB();
List<Long> pidList = valueMap.get(ProcessPerformanceProperty.PROCESSID);
List<Long> ppidList = valueMap.get(ProcessPerformanceProperty.PARENTPROCESSID);
List<Long> priorityList = valueMap.get(ProcessPerformanceProperty.PRIORITY);
List<Long> ioReadList = valueMap.get(ProcessPerformanceProperty.READTRANSFERCOUNT);
List<Long> ioWriteList = valueMap.get(ProcessPerformanceProperty.WRITETRANSFERCOUNT);
List<Long> workingSetSizeList = valueMap.get(ProcessPerformanceProperty.PRIVATEPAGECOUNT);
List<Long> creationTimeList = valueMap.get(ProcessPerformanceProperty.CREATIONDATE);
List<Long> pageFaultsList = valueMap.get(ProcessPerformanceProperty.PAGEFAULTSPERSEC);
for (int inst = 0; inst < instances.size(); inst++) {
int pid = pidList.get(inst).intValue();
if (pids == null || pids.contains(pid)) {
// if creation time value is less than current millis, it's in 1970 epoch,
// otherwise it's 1601 epoch and we must convert
long ctime = creationTimeList.get(inst);
if (ctime > now) {
ctime = WinBase.FILETIME.filetimeToDate((int) (ctime >> 32), (int) (ctime & 0xffffffffL)).getTime();
}
processMap.put(pid,
new PerfCounterBlock(instances.get(inst), ppidList.get(inst).intValue(),
priorityList.get(inst).intValue(), workingSetSizeList.get(inst), ctime, now - ctime,
ioReadList.get(inst), ioWriteList.get(inst), pageFaultsList.get(inst).intValue()));
}
}
return processMap;
}
|
#vulnerable code
public static Map<Integer, PerfCounterBlock> buildProcessMapFromPerfCounters(Collection<Integer> pids) {
Map<Integer, PerfCounterBlock> processMap = new HashMap<>();
Pair<List<String>, Map<ProcessPerformanceProperty, List<Long>>> instanceValues = ProcessInformation
.queryProcessCounters();
long now = System.currentTimeMillis(); // 1970 epoch
List<String> instances = instanceValues.getA();
Map<ProcessPerformanceProperty, List<Long>> valueMap = instanceValues.getB();
List<Long> pidList = valueMap.get(ProcessPerformanceProperty.PROCESSID);
List<Long> ppidList = valueMap.get(ProcessPerformanceProperty.PARENTPROCESSID);
List<Long> priorityList = valueMap.get(ProcessPerformanceProperty.PRIORITY);
List<Long> ioReadList = valueMap.get(ProcessPerformanceProperty.READTRANSFERCOUNT);
List<Long> ioWriteList = valueMap.get(ProcessPerformanceProperty.WRITETRANSFERCOUNT);
List<Long> workingSetSizeList = valueMap.get(ProcessPerformanceProperty.PRIVATEPAGECOUNT);
List<Long> creationTimeList = valueMap.get(ProcessPerformanceProperty.CREATIONDATE);
for (int inst = 0; inst < instances.size(); inst++) {
int pid = pidList.get(inst).intValue();
if (pids == null || pids.contains(pid)) {
// if creation time value is less than current millis, it's in 1970 epoch,
// otherwise it's 1601 epoch and we must convert
long ctime = creationTimeList.get(inst);
if (ctime > now) {
ctime = WinBase.FILETIME.filetimeToDate((int) (ctime >> 32), (int) (ctime & 0xffffffffL)).getTime();
}
processMap.put(pid,
new PerfCounterBlock(instances.get(inst), ppidList.get(inst).intValue(),
priorityList.get(inst).intValue(), workingSetSizeList.get(inst), ctime, now - ctime,
ioReadList.get(inst), ioWriteList.get(inst)));
}
}
return processMap;
}
#location 4
#vulnerability type INTERFACE_NOT_THREAD_SAFE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
private void populatePartitionMaps() {
driveToPartitionMap.clear();
partitionToLogicalDriveMap.clear();
partitionMap.clear();
// For Regexp matching DeviceIDs
Matcher mAnt;
Matcher mDep;
// Map drives to partitions
Map<String, List<Object>> partitionQueryMap = WmiUtil.selectObjectsFrom(null, "Win32_DiskDriveToDiskPartition",
DISK_TO_PARTITION_STRINGS, null, DISK_TO_PARTITION_TYPES);
for (int i = 0; i < partitionQueryMap.get(WmiProperty.ANTECEDENT.name()).size(); i++) {
mAnt = DEVICE_ID.matcher((String) partitionQueryMap.get(WmiProperty.ANTECEDENT.name()).get(i));
mDep = DEVICE_ID.matcher((String) partitionQueryMap.get(WmiProperty.DEPENDENT.name()).get(i));
if (mAnt.matches() && mDep.matches()) {
MapUtil.createNewListIfAbsent(driveToPartitionMap, mAnt.group(1).replaceAll("\\\\\\\\", "\\\\"))
.add(mDep.group(1));
}
}
// Map partitions to logical disks
partitionQueryMap = WmiUtil.selectObjectsFrom(null, "Win32_LogicalDiskToPartition",
DISK_TO_PARTITION_STRINGS, null, DISK_TO_PARTITION_TYPES);
for (int i = 0; i < partitionQueryMap.get(WmiProperty.ANTECEDENT.name()).size(); i++) {
mAnt = DEVICE_ID.matcher((String) partitionQueryMap.get(WmiProperty.ANTECEDENT.name()).get(i));
mDep = DEVICE_ID.matcher((String) partitionQueryMap.get(WmiProperty.DEPENDENT.name()).get(i));
if (mAnt.matches() && mDep.matches()) {
partitionToLogicalDriveMap.put(mAnt.group(1), mDep.group(1) + "\\");
}
}
// Next, get all partitions and create objects
final Map<String, List<Object>> hwPartitionQueryMap = WmiUtil.selectObjectsFrom(null, "Win32_DiskPartition",
PARTITION_STRINGS, null, PARTITION_TYPES);
for (int i = 0; i < hwPartitionQueryMap.get(WmiProperty.NAME.name()).size(); i++) {
String deviceID = (String) hwPartitionQueryMap.get(WmiProperty.DEVICEID.name()).get(i);
String logicalDrive = MapUtil.getOrDefault(partitionToLogicalDriveMap, deviceID, "");
String uuid = "";
if (!logicalDrive.isEmpty()) {
// Get matching volume for UUID
char[] volumeChr = new char[BUFSIZE];
Kernel32.INSTANCE.GetVolumeNameForVolumeMountPoint(logicalDrive, volumeChr, BUFSIZE);
uuid = ParseUtil.parseUuidOrDefault(new String(volumeChr).trim(), "");
}
partitionMap
.put(deviceID,
new HWPartition(
(String) hwPartitionQueryMap
.get(WmiProperty.NAME.name()).get(
i),
(String) hwPartitionQueryMap.get(WmiProperty.TYPE.name()).get(i),
(String) hwPartitionQueryMap.get(WmiProperty.DESCRIPTION.name()).get(i), uuid,
(Long) hwPartitionQueryMap.get(WmiProperty.SIZE.name()).get(i),
((Long) hwPartitionQueryMap.get(WmiProperty.DISKINDEX.name()).get(i)).intValue(),
((Long) hwPartitionQueryMap.get(WmiProperty.INDEX.name()).get(i)).intValue(),
logicalDrive));
}
}
|
#vulnerable code
private void populatePartitionMaps() {
driveToPartitionMap.clear();
partitionToLogicalDriveMap.clear();
partitionMap.clear();
// For Regexp matching DeviceIDs
Matcher mAnt;
Matcher mDep;
// Map drives to partitions
Map<String, List<String>> partitionQueryMap = WmiUtil.selectStringsFrom(null, "Win32_DiskDriveToDiskPartition",
DRIVE_TO_PARTITION_PROPERTIES, null);
for (int i = 0; i < partitionQueryMap.get(ANTECEDENT_PROPERTY).size(); i++) {
mAnt = DEVICE_ID.matcher(partitionQueryMap.get(ANTECEDENT_PROPERTY).get(i));
mDep = DEVICE_ID.matcher(partitionQueryMap.get(DEPENDENT_PROPERTY).get(i));
if (mAnt.matches() && mDep.matches()) {
MapUtil.createNewListIfAbsent(driveToPartitionMap, mAnt.group(1).replaceAll("\\\\\\\\", "\\\\"))
.add(mDep.group(1));
}
}
// Map partitions to logical disks
partitionQueryMap = WmiUtil.selectStringsFrom(null, "Win32_LogicalDiskToPartition",
LOGICAL_DISK_TO_PARTITION_PROPERTIES, null);
for (int i = 0; i < partitionQueryMap.get(ANTECEDENT_PROPERTY).size(); i++) {
mAnt = DEVICE_ID.matcher(partitionQueryMap.get(ANTECEDENT_PROPERTY).get(i));
mDep = DEVICE_ID.matcher(partitionQueryMap.get(DEPENDENT_PROPERTY).get(i));
if (mAnt.matches() && mDep.matches()) {
partitionToLogicalDriveMap.put(mAnt.group(1), mDep.group(1) + "\\");
}
}
// Next, get all partitions and create objects
final Map<String, List<Object>> hwPartitionQueryMap = WmiUtil.selectObjectsFrom(null, "Win32_DiskPartition",
PARTITION_PROPERTIES, null, PARTITION_TYPES);
for (int i = 0; i < hwPartitionQueryMap.get(NAME_PROPERTY).size(); i++) {
String deviceID = (String) hwPartitionQueryMap.get(DEVICE_ID_PROPERTY).get(i);
String logicalDrive = MapUtil.getOrDefault(partitionToLogicalDriveMap, deviceID, "");
String uuid = "";
if (!logicalDrive.isEmpty()) {
// Get matching volume for UUID
char[] volumeChr = new char[BUFSIZE];
Kernel32.INSTANCE.GetVolumeNameForVolumeMountPoint(logicalDrive, volumeChr, BUFSIZE);
uuid = ParseUtil.parseUuidOrDefault(new String(volumeChr).trim(), "");
}
partitionMap.put(deviceID,
new HWPartition((String) hwPartitionQueryMap.get(NAME_PROPERTY).get(i),
(String) hwPartitionQueryMap.get(TYPE_PROPERTY).get(i),
(String) hwPartitionQueryMap.get(DESCRIPTION_PROPERTY).get(i), uuid,
ParseUtil.parseLongOrDefault((String) hwPartitionQueryMap.get(SIZE_PROPERTY).get(i), 0L),
((Long) hwPartitionQueryMap.get(DISK_INDEX_PROPERTY).get(i)).intValue(),
((Long) hwPartitionQueryMap.get(INDEX_PROPERTY).get(i)).intValue(), logicalDrive));
}
}
#location 12
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
public float getProcessorLoad() {
Scanner in = null;
try {
in = new Scanner(new FileReader("/proc/stat"));
} catch (FileNotFoundException e) {
System.err.println("Problem with: /proc/stat");
System.err.println(e.getMessage());
return -1;
}
in.useDelimiter("\n");
String[] result = in.next().split(" ");
ArrayList<Float> loads = new ArrayList<Float>();
for (String load : result) {
if (load.matches("-?\\d+(\\.\\d+)?")) {
loads.add(Float.valueOf(load));
}
}
// ((Total-PrevTotal)-(Idle-PrevIdle))/(Total-PrevTotal) - see http://stackoverflow.com/a/23376195/4359897
float totalCpuLoad = (loads.get(0) + loads.get(2))*100 / (loads.get(0) + loads.get(2) + loads.get(3));
return FormatUtil.round(totalCpuLoad, 2);
}
|
#vulnerable code
public float getProcessorLoad() {
// should be same as on Mac. Not tested.
ArrayList<String> topResult = ExecutingCommand.runNative("top -l 1 -R -F -n1"); // cpu load is in [3]
String[] idle = topResult.get(3).split(" "); // idle value is in [6]
return 100 - Float.valueOf(idle[6].replace("%", ""));
}
#location 5
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
public static <T extends Enum<T>> Map<T, Long> queryValues(Class<T> propertyEnum, String perfObject,
String perfWmiClass) {
// Check without locking for performance
if (!failedQueryCache.contains(perfObject)) {
failedQueryCacheLock.lock();
try {
// Double check lock
if (!failedQueryCache.contains(perfObject)) {
Map<T, Long> valueMap = queryValuesFromPDH(propertyEnum, perfObject);
if (!valueMap.isEmpty()) {
return valueMap;
}
// If we are here, query failed
LOG.warn("Disabling further attempts to query {}.", perfObject);
failedQueryCache.add(perfObject);
}
} finally {
failedQueryCacheLock.unlock();
}
}
return queryValuesFromWMI(propertyEnum, perfWmiClass);
}
|
#vulnerable code
public static <T extends Enum<T>> Map<T, Long> queryValues(Class<T> propertyEnum, String perfObject,
String perfWmiClass) {
Map<T, Long> valueMap = queryValuesFromPDH(propertyEnum, perfObject);
if (valueMap.isEmpty()) {
return queryValuesFromWMI(propertyEnum, perfWmiClass);
}
return valueMap;
}
#location 3
#vulnerability type INTERFACE_NOT_THREAD_SAFE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
public static ArrayList<String> runNative(String cmdToRun) {
Process p = null;
try {
p = Runtime.getRuntime().exec(cmdToRun);
//p.waitFor();
} catch (IOException e) {
return null;
}
BufferedReader reader = new BufferedReader(new InputStreamReader(
p.getInputStream()));
String line = "";
ArrayList<String> sa = new ArrayList<String>();
try {
while ((line = reader.readLine()) != null) {
sa.add(line);
}
} catch (IOException e) {
return null;
}
p.destroy();
return sa;
}
|
#vulnerable code
public static ArrayList<String> runNative(String cmdToRun) {
Process p = null;
try {
p = Runtime.getRuntime().exec(cmdToRun);
p.waitFor();
} catch (IOException e) {
return null;
} catch (InterruptedException e) {
return null;
}
BufferedReader reader = new BufferedReader(new InputStreamReader(
p.getInputStream()));
String line = "";
ArrayList<String> sa = new ArrayList<String>();
try {
while ((line = reader.readLine()) != null) {
sa.add(line);
}
} catch (IOException e) {
return null;
}
return sa;
}
#location 13
#vulnerability type RESOURCE_LEAK
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Override
public double getCpuVoltage() {
// Initialize
double volts = 0d;
// If we couldn't get through normal WMI go directly to OHM
if (this.voltIdentifierStr != null) {
double[] vals = wmiGetValuesForKeys("/namespace:\\\\root\\OpenHardwareMonitor PATH Sensor",
this.voltIdentifierStr, "Voltage", "Identifier,SensorType,Value");
if (vals.length > 0) {
// Return the first voltage reading
volts = vals[0];
}
return volts;
}
// This branch is used the first time and all subsequent times if
// successful (voltIdenifierStr == null)
// Try to get value
// Try to get value using initial or updated successful values
int decivolts = 0;
if (this.wmiVoltPath == null) {
this.wmiVoltPath = "CPU";
this.wmiVoltProperty = "CurrentVoltage";
decivolts = wmiGetValue(this.wmiVoltPath, this.wmiVoltProperty);
if (decivolts < 0) {
this.wmiVoltPath = "/namespace:\\\\root\\cimv2 PATH Win32_Processor";
decivolts = wmiGetValue(this.wmiVoltPath, this.wmiVoltProperty);
}
// If the eighth bit is set, bits 0-6 contain the voltage
// multiplied by 10. If the eighth bit is not set, then the bit
// setting in VoltageCaps represents the voltage value.
if ((decivolts & 0x80) == 0 && decivolts > 0) {
this.wmiVoltProperty = "VoltageCaps";
// really a bit setting, not decivolts, test later
decivolts = wmiGetValue(this.wmiVoltPath, this.wmiVoltProperty);
}
} else {
// We've successfully read a previous time, or failed both here and
// with OHM
decivolts = wmiGetValue(this.wmiVoltPath, this.wmiVoltProperty);
}
// Convert dV to V and return result
if (decivolts > 0) {
if (this.wmiVoltProperty.equals("VoltageCaps")) {
// decivolts are bits
if ((decivolts & 0x1) > 0) {
volts = 5.0;
} else if ((decivolts & 0x2) > 0) {
volts = 3.3;
} else if ((decivolts & 0x4) > 0) {
volts = 2.9;
}
} else {
// Value from bits 0-6
volts = (decivolts & 0x7F) / 10d;
}
}
if (volts <= 0d) {
// Unable to get voltage via WMI. Future attempts will be
// attempted via Open Hardware Monitor WMI if successful
String[] voltIdentifiers = wmiGetStrValuesForKey("/namespace:\\\\root\\OpenHardwareMonitor PATH Hardware",
"Voltage", "SensorType,Identifier");
// Look for identifier containing "cpu"
for (String id : voltIdentifiers) {
if (id.toLowerCase().contains("cpu")) {
this.voltIdentifierStr = id;
break;
}
}
// If none contain cpu just grab the first one
if (voltIdentifiers.length > 0) {
this.voltIdentifierStr = voltIdentifiers[0];
}
// If not null, recurse and get value via OHM
if (this.voltIdentifierStr != null) {
return getCpuVoltage();
}
}
return volts;
}
|
#vulnerable code
@Override
public double getCpuVoltage() {
ArrayList<String> hwInfo = ExecutingCommand.runNative("wmic cpu get CurrentVoltage");
for (String checkLine : hwInfo) {
if (checkLine.length() == 0 || checkLine.toLowerCase().contains("currentvoltage")) {
continue;
} else {
// If successful this line is in tenths of volts
try {
int decivolts = Integer.parseInt(checkLine.trim());
if (decivolts > 0) {
return decivolts / 10d;
}
} catch (NumberFormatException e) {
// If we failed to parse, give up
}
break;
}
}
// Above query failed, try something else
hwInfo = ExecutingCommand.runNative("wmic /namespace:\\\\root\\cimv2 PATH Win32_Processor get CurrentVoltage");
for (String checkLine : hwInfo) {
if (checkLine.length() == 0 || checkLine.toLowerCase().contains("currentreading")) {
continue;
} else {
// If successful:
// If the eighth bit is set, bits 0-6 contain the voltage
// multiplied by 10. If the eighth bit is not set, then the bit
// setting in VoltageCaps represents the voltage value.
try {
int decivolts = Integer.parseInt(checkLine.trim());
// Check if 8th bit (of 16 bit number) is set
if ((decivolts & 0x80) > 0 && decivolts > 0) {
return decivolts / 10d;
}
} catch (NumberFormatException e) {
// If we failed to parse, give up
}
break;
}
}
// Above query failed, try something else
hwInfo = ExecutingCommand.runNative("wmic /namespace:\\\\root\\cimv2 PATH Win32_Processor get VoltageCaps");
for (String checkLine : hwInfo) {
if (checkLine.length() == 0 || checkLine.toLowerCase().contains("currentreading")) {
continue;
} else {
// If successful:
// Bits 0-3 of the field represent specific voltages that the
// processor socket can accept.
try {
int voltagebits = Integer.parseInt(checkLine.trim());
// Return appropriate voltage
if ((voltagebits & 0x1) > 0) {
return 5.0;
} else if ((voltagebits & 0x2) > 0) {
return 3.3;
} else if ((voltagebits & 0x4) > 0) {
return 2.9;
}
} catch (NumberFormatException e) {
// If we failed to parse, give up
}
break;
}
}
return 0d;
}
#location 4
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
private void populateReadWriteMaps() {
// Although the field names say "PerSec" this is the Raw Data from which
// the associated fields are populated in the Formatted Data class, so
// in fact this is the data we want
Map<String, List<Object>> vals = WmiUtil.selectObjectsFrom(null, "Win32_PerfRawData_PerfDisk_PhysicalDisk",
"Name,DiskReadsPerSec,DiskReadBytesPerSec,DiskWritesPerSec,DiskWriteBytesPerSec,PercentDiskTime", null,
READ_WRITE_TYPES);
for (int i = 0; i < vals.get("Name").size(); i++) {
String index = ((String) vals.get("Name").get(i)).split("\\s+")[0];
readMap.put(index, (long) vals.get("DiskReadsPerSec").get(i));
readByteMap.put(index, ParseUtil.parseLongOrDefault((String) vals.get("DiskReadBytesPerSec").get(i), 0L));
writeMap.put(index, (long) vals.get("DiskWritesPerSec").get(i));
writeByteMap.put(index, ParseUtil.parseLongOrDefault((String) vals.get("DiskWriteBytesPerSec").get(i), 0L));
// Units are 100-ns, divide to get ms
xferTimeMap.put(index,
ParseUtil.parseLongOrDefault((String) vals.get("PercentDiskTime").get(i), 0L) / 10000L);
}
}
|
#vulnerable code
private void populateReadWriteMaps() {
// Although the field names say "PerSec" this is the Raw Data from which
// the associated fields are populated in the Formatted Data class, so
// in fact this is the data we want
Map<String, List<String>> vals = WmiUtil.selectStringsFrom(null, "Win32_PerfRawData_PerfDisk_PhysicalDisk",
"Name,DiskReadBytesPerSec,DiskWriteBytesPerSec", null);
for (int i = 0; i < vals.get("Name").size(); i++) {
String index = vals.get("Name").get(i).split("\\s+")[0];
readMap.put(index, ParseUtil.parseLongOrDefault(vals.get("DiskReadBytesPerSec").get(i), 0L));
writeMap.put(index, ParseUtil.parseLongOrDefault(vals.get("DiskWriteBytesPerSec").get(i), 0L));
}
}
#location 10
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Override
public boolean updateAttributes() {
try {
File ifDir = new File(String.format("/sys/class/net/%s/statistics", getName()));
if (!ifDir.isDirectory()) {
return false;
}
} catch (SecurityException e) {
return false;
}
String ifTypePath = String.format("/sys/class/net/%s/type", getName());
String carrierPath = String.format("/sys/class/net/%s/carrier", getName());
String txBytesPath = String.format("/sys/class/net/%s/statistics/tx_bytes", getName());
String rxBytesPath = String.format("/sys/class/net/%s/statistics/rx_bytes", getName());
String txPacketsPath = String.format("/sys/class/net/%s/statistics/tx_packets", getName());
String rxPacketsPath = String.format("/sys/class/net/%s/statistics/rx_packets", getName());
String txErrorsPath = String.format("/sys/class/net/%s/statistics/tx_errors", getName());
String rxErrorsPath = String.format("/sys/class/net/%s/statistics/rx_errors", getName());
String collisionsPath = String.format("/sys/class/net/%s/statistics/collisions", getName());
String rxDropsPath = String.format("/sys/class/net/%s/statistics/rx_dropped", getName());
String ifSpeed = String.format("/sys/class/net/%s/speed", getName());
this.timeStamp = System.currentTimeMillis();
this.ifType = FileUtil.getIntFromFile(ifTypePath);
this.connectorPresent = FileUtil.getIntFromFile(carrierPath) > 0;
this.bytesSent = FileUtil.getUnsignedLongFromFile(txBytesPath);
this.bytesRecv = FileUtil.getUnsignedLongFromFile(rxBytesPath);
this.packetsSent = FileUtil.getUnsignedLongFromFile(txPacketsPath);
this.packetsRecv = FileUtil.getUnsignedLongFromFile(rxPacketsPath);
this.outErrors = FileUtil.getUnsignedLongFromFile(txErrorsPath);
this.inErrors = FileUtil.getUnsignedLongFromFile(rxErrorsPath);
this.collisions = FileUtil.getUnsignedLongFromFile(collisionsPath);
this.inDrops = FileUtil.getUnsignedLongFromFile(rxDropsPath);
long speedMiB = FileUtil.getUnsignedLongFromFile(ifSpeed);
// speed may be -1 from file.
this.speed = speedMiB < 0 ? 0 : speedMiB << 20;
return true;
}
|
#vulnerable code
@Override
public boolean updateAttributes() {
try {
File ifDir = new File(String.format("/sys/class/net/%s/statistics", getName()));
if (!ifDir.isDirectory()) {
return false;
}
} catch (SecurityException e) {
return false;
}
String ifTypePath = String.format("/sys/class/net/%s/type", getName());
String carrierPath = String.format("/sys/class/net/%s/carrier", getName());
String txBytesPath = String.format("/sys/class/net/%s/statistics/tx_bytes", getName());
String rxBytesPath = String.format("/sys/class/net/%s/statistics/rx_bytes", getName());
String txPacketsPath = String.format("/sys/class/net/%s/statistics/tx_packets", getName());
String rxPacketsPath = String.format("/sys/class/net/%s/statistics/rx_packets", getName());
String txErrorsPath = String.format("/sys/class/net/%s/statistics/tx_errors", getName());
String rxErrorsPath = String.format("/sys/class/net/%s/statistics/rx_errors", getName());
String collisionsPath = String.format("/sys/class/net/%s/statistics/collisions", getName());
String rxDropsPath = String.format("/sys/class/net/%s/statistics/rx_dropped", getName());
String ifSpeed = String.format("/sys/class/net/%s/speed", getName());
this.timeStamp = System.currentTimeMillis();
this.ifType = FileUtil.getIntFromFile(ifTypePath);
this.connectorPresent = FileUtil.getIntFromFile(carrierPath) > 0;
this.bytesSent = FileUtil.getUnsignedLongFromFile(txBytesPath);
this.bytesRecv = FileUtil.getUnsignedLongFromFile(rxBytesPath);
this.packetsSent = FileUtil.getUnsignedLongFromFile(txPacketsPath);
this.packetsRecv = FileUtil.getUnsignedLongFromFile(rxPacketsPath);
this.outErrors = FileUtil.getUnsignedLongFromFile(txErrorsPath);
this.inErrors = FileUtil.getUnsignedLongFromFile(rxErrorsPath);
this.collisions = FileUtil.getUnsignedLongFromFile(collisionsPath);
this.inDrops = FileUtil.getUnsignedLongFromFile(rxDropsPath);
// speed may be negative from file. Convert to MiB.
this.speed = FileUtil.getUnsignedLongFromFile(ifSpeed);
this.speed = this.speed < 0 ? 0 : this.speed << 20;
return true;
}
#location 35
#vulnerability type THREAD_SAFETY_VIOLATION
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
private void populatePartitionMaps() {
driveToPartitionMap.clear();
partitionToLogicalDriveMap.clear();
partitionMap.clear();
// For Regexp matching DeviceIDs
Matcher mAnt;
Matcher mDep;
// Map drives to partitions
Map<String, List<Object>> partitionQueryMap = WmiUtil.selectObjectsFrom(null, "Win32_DiskDriveToDiskPartition",
DISK_TO_PARTITION_STRINGS, null, DISK_TO_PARTITION_TYPES);
for (int i = 0; i < partitionQueryMap.get(WmiProperty.ANTECEDENT.name()).size(); i++) {
mAnt = DEVICE_ID.matcher((String) partitionQueryMap.get(WmiProperty.ANTECEDENT.name()).get(i));
mDep = DEVICE_ID.matcher((String) partitionQueryMap.get(WmiProperty.DEPENDENT.name()).get(i));
if (mAnt.matches() && mDep.matches()) {
MapUtil.createNewListIfAbsent(driveToPartitionMap, mAnt.group(1).replaceAll("\\\\\\\\", "\\\\"))
.add(mDep.group(1));
}
}
// Map partitions to logical disks
partitionQueryMap = WmiUtil.selectObjectsFrom(null, "Win32_LogicalDiskToPartition",
DISK_TO_PARTITION_STRINGS, null, DISK_TO_PARTITION_TYPES);
for (int i = 0; i < partitionQueryMap.get(WmiProperty.ANTECEDENT.name()).size(); i++) {
mAnt = DEVICE_ID.matcher((String) partitionQueryMap.get(WmiProperty.ANTECEDENT.name()).get(i));
mDep = DEVICE_ID.matcher((String) partitionQueryMap.get(WmiProperty.DEPENDENT.name()).get(i));
if (mAnt.matches() && mDep.matches()) {
partitionToLogicalDriveMap.put(mAnt.group(1), mDep.group(1) + "\\");
}
}
// Next, get all partitions and create objects
final Map<String, List<Object>> hwPartitionQueryMap = WmiUtil.selectObjectsFrom(null, "Win32_DiskPartition",
PARTITION_STRINGS, null, PARTITION_TYPES);
for (int i = 0; i < hwPartitionQueryMap.get(WmiProperty.NAME.name()).size(); i++) {
String deviceID = (String) hwPartitionQueryMap.get(WmiProperty.DEVICEID.name()).get(i);
String logicalDrive = MapUtil.getOrDefault(partitionToLogicalDriveMap, deviceID, "");
String uuid = "";
if (!logicalDrive.isEmpty()) {
// Get matching volume for UUID
char[] volumeChr = new char[BUFSIZE];
Kernel32.INSTANCE.GetVolumeNameForVolumeMountPoint(logicalDrive, volumeChr, BUFSIZE);
uuid = ParseUtil.parseUuidOrDefault(new String(volumeChr).trim(), "");
}
partitionMap
.put(deviceID,
new HWPartition(
(String) hwPartitionQueryMap
.get(WmiProperty.NAME.name()).get(
i),
(String) hwPartitionQueryMap.get(WmiProperty.TYPE.name()).get(i),
(String) hwPartitionQueryMap.get(WmiProperty.DESCRIPTION.name()).get(i), uuid,
(Long) hwPartitionQueryMap.get(WmiProperty.SIZE.name()).get(i),
((Long) hwPartitionQueryMap.get(WmiProperty.DISKINDEX.name()).get(i)).intValue(),
((Long) hwPartitionQueryMap.get(WmiProperty.INDEX.name()).get(i)).intValue(),
logicalDrive));
}
}
|
#vulnerable code
private void populatePartitionMaps() {
driveToPartitionMap.clear();
partitionToLogicalDriveMap.clear();
partitionMap.clear();
// For Regexp matching DeviceIDs
Matcher mAnt;
Matcher mDep;
// Map drives to partitions
Map<String, List<String>> partitionQueryMap = WmiUtil.selectStringsFrom(null, "Win32_DiskDriveToDiskPartition",
DRIVE_TO_PARTITION_PROPERTIES, null);
for (int i = 0; i < partitionQueryMap.get(ANTECEDENT_PROPERTY).size(); i++) {
mAnt = DEVICE_ID.matcher(partitionQueryMap.get(ANTECEDENT_PROPERTY).get(i));
mDep = DEVICE_ID.matcher(partitionQueryMap.get(DEPENDENT_PROPERTY).get(i));
if (mAnt.matches() && mDep.matches()) {
MapUtil.createNewListIfAbsent(driveToPartitionMap, mAnt.group(1).replaceAll("\\\\\\\\", "\\\\"))
.add(mDep.group(1));
}
}
// Map partitions to logical disks
partitionQueryMap = WmiUtil.selectStringsFrom(null, "Win32_LogicalDiskToPartition",
LOGICAL_DISK_TO_PARTITION_PROPERTIES, null);
for (int i = 0; i < partitionQueryMap.get(ANTECEDENT_PROPERTY).size(); i++) {
mAnt = DEVICE_ID.matcher(partitionQueryMap.get(ANTECEDENT_PROPERTY).get(i));
mDep = DEVICE_ID.matcher(partitionQueryMap.get(DEPENDENT_PROPERTY).get(i));
if (mAnt.matches() && mDep.matches()) {
partitionToLogicalDriveMap.put(mAnt.group(1), mDep.group(1) + "\\");
}
}
// Next, get all partitions and create objects
final Map<String, List<Object>> hwPartitionQueryMap = WmiUtil.selectObjectsFrom(null, "Win32_DiskPartition",
PARTITION_PROPERTIES, null, PARTITION_TYPES);
for (int i = 0; i < hwPartitionQueryMap.get(NAME_PROPERTY).size(); i++) {
String deviceID = (String) hwPartitionQueryMap.get(DEVICE_ID_PROPERTY).get(i);
String logicalDrive = MapUtil.getOrDefault(partitionToLogicalDriveMap, deviceID, "");
String uuid = "";
if (!logicalDrive.isEmpty()) {
// Get matching volume for UUID
char[] volumeChr = new char[BUFSIZE];
Kernel32.INSTANCE.GetVolumeNameForVolumeMountPoint(logicalDrive, volumeChr, BUFSIZE);
uuid = ParseUtil.parseUuidOrDefault(new String(volumeChr).trim(), "");
}
partitionMap.put(deviceID,
new HWPartition((String) hwPartitionQueryMap.get(NAME_PROPERTY).get(i),
(String) hwPartitionQueryMap.get(TYPE_PROPERTY).get(i),
(String) hwPartitionQueryMap.get(DESCRIPTION_PROPERTY).get(i), uuid,
ParseUtil.parseLongOrDefault((String) hwPartitionQueryMap.get(SIZE_PROPERTY).get(i), 0L),
((Long) hwPartitionQueryMap.get(DISK_INDEX_PROPERTY).get(i)).intValue(),
((Long) hwPartitionQueryMap.get(INDEX_PROPERTY).get(i)).intValue(), logicalDrive));
}
}
#location 14
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
private List<OSFileStore> getWmiVolumes() {
Map<String, List<Object>> drives;
List<OSFileStore> fs;
String volume;
long free;
long total;
fs = new ArrayList<>();
drives = WmiUtil.selectObjectsFrom(null, "Win32_LogicalDisk", FS_PROPERTIES, null, FS_TYPES);
for (int i = 0; i < drives.get(NAME_PROPERTY).size(); i++) {
free = (Long) drives.get(FREESPACE_PROPERTY).get(i);
total = (Long) drives.get(SIZE_PROPERTY).get(i);
String description = (String) drives.get(DESCRIPTION_PROPERTY).get(i);
String name = (String) drives.get(NAME_PROPERTY).get(i);
long type = (Long) drives.get(DRIVE_TYPE_PROPERTY).get(i);
if (type != 4) {
char[] chrVolume = new char[BUFSIZE];
Kernel32.INSTANCE.GetVolumeNameForVolumeMountPoint(name + "\\", chrVolume, BUFSIZE);
volume = new String(chrVolume).trim();
} else {
volume = (String) drives.get(PROVIDER_NAME_PROPERTY).get(i);
String[] split = volume.split("\\\\");
if (split.length > 1 && split[split.length - 1].length() > 0) {
description = split[split.length - 1];
}
}
fs.add(new OSFileStore(String.format("%s (%s)", description, name), volume, name + "\\", getDriveType(name),
(String) drives.get(FILESYSTEM_PROPERTY).get(i), "", free, total));
}
return fs;
}
|
#vulnerable code
private List<OSFileStore> getWmiVolumes() {
Map<String, List<String>> drives;
List<OSFileStore> fs;
String volume;
long free;
long total;
fs = new ArrayList<>();
drives = WmiUtil.selectStringsFrom(null, "Win32_LogicalDisk",
"Name,Description,ProviderName,FileSystem,Freespace,Size", null);
for (int i = 0; i < drives.get("Name").size(); i++) {
free = ParseUtil.parseLongOrDefault(drives.get("Freespace").get(i), 0L);
total = ParseUtil.parseLongOrDefault(drives.get("Size").get(i), 0L);
String description = drives.get("Description").get(i);
long type = WmiUtil.selectUint32From(null, "Win32_LogicalDisk", "DriveType",
"WHERE Name = '" + drives.get("Name").get(i) + "'");
if (type != 4) {
char[] chrVolume = new char[BUFSIZE];
Kernel32.INSTANCE.GetVolumeNameForVolumeMountPoint(drives.get("Name").get(i) + "\\", chrVolume,
BUFSIZE);
volume = new String(chrVolume).trim();
} else {
volume = drives.get("ProviderName").get(i);
String[] split = volume.split("\\\\");
if (split.length > 1 && split[split.length - 1].length() > 0) {
description = split[split.length - 1];
}
}
fs.add(new OSFileStore(String.format("%s (%s)", description, drives.get("Name").get(i)), volume,
drives.get("Name").get(i) + "\\", getDriveType(drives.get("Name").get(i)),
drives.get("FileSystem").get(i), "", free, total));
}
return fs;
}
#location 13
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
public void removeAllCounters() {
// Remove all counters from counterHandle map
for (HANDLEByReference href : counterHandleMap.values()) {
PerfDataUtil.removeCounter(href);
}
counterHandleMap.clear();
// Remove query
if (this.queryHandle != null) {
PerfDataUtil.closeQuery(this.queryHandle);
}
this.queryHandle = null;
}
|
#vulnerable code
public void removeAllCounters() {
// Remove all counter handles
for (HANDLEByReference href : counterHandleMap.values()) {
PerfDataUtil.removeCounter(href);
}
counterHandleMap.clear();
// Remove all queries
for (HANDLEByReference query : queryHandleMap.values()) {
PerfDataUtil.closeQuery(query);
}
queryHandleMap.clear();
queryCounterMap.clear();
}
#location 9
#vulnerability type INTERFACE_NOT_THREAD_SAFE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
public Processor[] getProcessors() {
if (_processors == null) {
List<Processor> processors = new ArrayList<Processor>();
List<String> cpuInfo = null;
try {
cpuInfo = FileUtil.readFile("/proc/cpuinfo");
} catch (IOException e) {
System.err.println("Problem with: /proc/cpuinfo");
System.err.println(e.getMessage());
return null;
}
CentralProcessor cpu = null;
for (String toBeAnalyzed : cpuInfo) {
if (toBeAnalyzed.equals("")) {
if (cpu != null) {
processors.add(cpu);
}
cpu = null;
continue;
}
if (cpu == null) {
cpu = new CentralProcessor();
}
if (toBeAnalyzed.startsWith("model name\t")) {
cpu.setName(toBeAnalyzed.split(SEPARATOR)[1]); // model
// name
continue;
}
if (toBeAnalyzed.startsWith("flags\t")) {
String[] flags = toBeAnalyzed.split(SEPARATOR)[1]
.split(" ");
boolean found = false;
for (String flag : flags) {
if (flag.equalsIgnoreCase("LM")) {
found = true;
break;
}
}
cpu.setCpu64(found);
continue;
}
if (toBeAnalyzed.startsWith("cpu family\t")) {
cpu.setFamily(toBeAnalyzed.split(SEPARATOR)[1]); // model
// name
continue;
}
if (toBeAnalyzed.startsWith("model\t")) {
cpu.setModel(toBeAnalyzed.split(SEPARATOR)[1]); // model
// name
continue;
}
if (toBeAnalyzed.startsWith("stepping\t")) {
cpu.setStepping(toBeAnalyzed.split(SEPARATOR)[1]); // model
// name
continue;
}
if (toBeAnalyzed.startsWith("vendor_id")) {
cpu.setVendor(toBeAnalyzed.split(SEPARATOR)[1]); // vendor_id
continue;
}
}
if (cpu != null) {
processors.add(cpu);
}
_processors = processors.toArray(new Processor[0]);
}
return _processors;
}
|
#vulnerable code
public Processor[] getProcessors() {
if (_processors == null) {
List<Processor> processors = new ArrayList<Processor>();
Scanner in = null;
try {
in = new Scanner(new FileReader("/proc/cpuinfo"));
} catch (FileNotFoundException e) {
System.err.println("Problem with: /proc/cpuinfo");
System.err.println(e.getMessage());
return null;
}
in.useDelimiter("\n");
CentralProcessor cpu = null;
while (in.hasNext()) {
String toBeAnalyzed = in.next();
if (toBeAnalyzed.equals("")) {
if (cpu != null) {
processors.add(cpu);
}
cpu = null;
continue;
}
if (cpu == null) {
cpu = new CentralProcessor();
}
if (toBeAnalyzed.startsWith("model name\t")) {
cpu.setName(toBeAnalyzed.split(SEPARATOR)[1]); // model
// name
continue;
}
if (toBeAnalyzed.startsWith("flags\t")) {
String[] flags = toBeAnalyzed.split(SEPARATOR)[1]
.split(" ");
boolean found = false;
for (String flag : flags) {
if (flag.equalsIgnoreCase("LM")) {
found = true;
break;
}
}
cpu.setCpu64(found);
continue;
}
if (toBeAnalyzed.startsWith("cpu family\t")) {
cpu.setFamily(toBeAnalyzed.split(SEPARATOR)[1]); // model
// name
continue;
}
if (toBeAnalyzed.startsWith("model\t")) {
cpu.setModel(toBeAnalyzed.split(SEPARATOR)[1]); // model
// name
continue;
}
if (toBeAnalyzed.startsWith("stepping\t")) {
cpu.setStepping(toBeAnalyzed.split(SEPARATOR)[1]); // model
// name
continue;
}
if (toBeAnalyzed.startsWith("vendor_id")) {
cpu.setVendor(toBeAnalyzed.split(SEPARATOR)[1]); // vendor_id
continue;
}
}
in.close();
if (cpu != null) {
processors.add(cpu);
}
_processors = processors.toArray(new Processor[0]);
}
return _processors;
}
#location 65
#vulnerability type RESOURCE_LEAK
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Override
public long[] querySystemCpuLoadTicks() {
// To get load in processor group scenario, we need perfmon counters, but the
// _Total instance is an average rather than total (scaled) number of ticks
// which matches GetSystemTimes() results. We can just query the per-processor
// ticks and add them up. Calling the get() method gains the benefit of
// synchronizing this output with the memoized result of per-processor ticks as
// well.
long[] ticks = new long[TickType.values().length];
// Sum processor ticks
long[][] procTicks = getProcessorCpuLoadTicks();
for (int i = 0; i < ticks.length; i++) {
for (long[] procTick : procTicks) {
ticks[i] += procTick[i];
}
}
return ticks;
}
|
#vulnerable code
@Override
public long[] querySystemCpuLoadTicks() {
long[] ticks = new long[TickType.values().length];
WinBase.FILETIME lpIdleTime = new WinBase.FILETIME();
WinBase.FILETIME lpKernelTime = new WinBase.FILETIME();
WinBase.FILETIME lpUserTime = new WinBase.FILETIME();
if (!Kernel32.INSTANCE.GetSystemTimes(lpIdleTime, lpKernelTime, lpUserTime)) {
LOG.error("Failed to update system idle/kernel/user times. Error code: {}", Native.getLastError());
return ticks;
}
// IOwait:
// Windows does not measure IOWait.
// IRQ and ticks:
// Percent time raw value is cumulative 100NS-ticks
// Divide by 10_000 to get milliseconds
Map<SystemTickCountProperty, Long> valueMap = ProcessorInformation.querySystemCounters();
ticks[TickType.IRQ.getIndex()] = valueMap.getOrDefault(SystemTickCountProperty.PERCENTINTERRUPTTIME, 0L)
/ 10_000L;
ticks[TickType.SOFTIRQ.getIndex()] = valueMap.getOrDefault(SystemTickCountProperty.PERCENTDPCTIME, 0L)
/ 10_000L;
ticks[TickType.IDLE.getIndex()] = lpIdleTime.toDWordLong().longValue() / 10_000L;
ticks[TickType.SYSTEM.getIndex()] = lpKernelTime.toDWordLong().longValue() / 10_000L
- ticks[TickType.IDLE.getIndex()];
ticks[TickType.USER.getIndex()] = lpUserTime.toDWordLong().longValue() / 10_000L;
// Additional decrement to avoid double counting in the total array
ticks[TickType.SYSTEM.getIndex()] -= ticks[TickType.IRQ.getIndex()] + ticks[TickType.SOFTIRQ.getIndex()];
return ticks;
}
#location 7
#vulnerability type INTERFACE_NOT_THREAD_SAFE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Override
public int[] getFanSpeeds() {
// Initialize
int[] fanSpeeds = new int[1];
// If we couldn't get through normal WMI go directly to OHM
if (!this.fanSpeedWMI) {
double[] vals = wmiGetValuesForKeys("/namespace:\\\\root\\OpenHardwareMonitor PATH Sensor", null, "Fan",
"Parent,SensorType,Value");
if (vals.length > 0) {
fanSpeeds = new int[vals.length];
for (int i = 0; i < vals.length; i++) {
fanSpeeds[i] = (int) vals[i];
}
}
return fanSpeeds;
}
// This branch is used the first time and all subsequent times if
// successful (fanSpeedWMI == true)
// Try to get value
int rpm = wmiGetValue("/namespace:\\\\root\\cimv2 PATH Win32_Fan", "DesiredSpeed");
// Set in array and return
if (rpm > 0) {
fanSpeeds[0] = rpm;
} else {
// Fail, switch to OHM
this.fanSpeedWMI = false;
return getFanSpeeds();
}
return fanSpeeds;
}
|
#vulnerable code
@Override
public int[] getFanSpeeds() {
int[] fanSpeeds = new int[1];
ArrayList<String> hwInfo = ExecutingCommand
.runNative("wmic /namespace:\\\\root\\cimv2 PATH Win32_Fan get DesiredSpeed");
for (String checkLine : hwInfo) {
if (checkLine.length() == 0 || checkLine.toLowerCase().contains("desiredspeed")) {
continue;
} else {
// If successful
try {
int rpm = Integer.parseInt(checkLine.trim());
// Check if 8th bit (of 16 bit number) is set
if (rpm > 0) {
fanSpeeds[0] = rpm;
return fanSpeeds;
}
} catch (NumberFormatException e) {
// If we failed to parse, give up
}
break;
}
}
return fanSpeeds;
}
#location 6
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
private static void enumerateProperties(Map<String, List<Object>> values, EnumWbemClassObject enumerator,
String[] properties, ValueType[] propertyTypes) {
if (propertyTypes.length > 1 && properties.length != propertyTypes.length) {
throw new IllegalArgumentException("Property type array size must be 1 or equal to properties array size.");
}
// Step 7: -------------------------------------------------
// Get the data from the query in step 6 -------------------
PointerByReference pclsObj = new PointerByReference();
LongByReference uReturn = new LongByReference(0L);
while (enumerator.getPointer() != Pointer.NULL) {
HRESULT hres = enumerator.Next(new NativeLong(EnumWbemClassObject.WBEM_INFINITE), new NativeLong(1),
pclsObj, uReturn);
// Requested 1; if 0 objects returned, we're done
if (0L == uReturn.getValue() || COMUtils.FAILED(hres)) {
// Enumerator will be released by calling method so no need to
// release it here.
return;
}
VARIANT.ByReference vtProp = new VARIANT.ByReference();
// Get the value of the properties
WbemClassObject clsObj = new WbemClassObject(pclsObj.getValue());
for (int p = 0; p < properties.length; p++) {
String property = properties[p];
hres = clsObj.Get(new BSTR(property), new NativeLong(0L), vtProp, null, null);
ValueType propertyType = propertyTypes.length > 1 ? propertyTypes[p] : propertyTypes[0];
switch (propertyType) {
// WMI Longs will return as strings
case STRING:
values.get(property).add(vtProp.getValue() == null ? "unknown" : vtProp.stringValue());
break;
// WMI Uint32s will return as longs
case UINT32: // WinDef.LONG TODO improve in JNA 4.3
case UINT64:
values.get(property)
.add(vtProp.getValue() == null ? 0L : vtProp._variant.__variant.lVal.longValue());
break;
case FLOAT:
values.get(property).add(vtProp.getValue() == null ? 0f : vtProp.floatValue());
break;
case DATETIME:
// Read a string in format 20160513072950.782000-420 and
// parse to a long representing ms since eopch
values.get(property)
.add(vtProp.getValue() == null ? 0L : ParseUtil.cimDateTimeToMillis(vtProp.stringValue()));
break;
case BOOLEAN: // WinDef.BOOL TODO improve in JNA 4.3
values.get(property)
.add(vtProp.getValue() == null ? 0L : vtProp._variant.__variant.boolVal.booleanValue());
break;
default:
// Should never get here! If you get this exception you've
// added something to the enum without adding it here. Tsk.
throw new IllegalArgumentException("Unimplemented enum type: " + propertyType.toString());
}
OleAuto.INSTANCE.VariantClear(vtProp.getPointer());
}
clsObj.Release();
}
}
|
#vulnerable code
private static void enumerateProperties(Map<String, List<Object>> values, EnumWbemClassObject enumerator,
String[] properties, ValueType[] propertyTypes) {
if (propertyTypes.length > 1 && properties.length != propertyTypes.length) {
throw new IllegalArgumentException("Property type array size must be 1 or equal to properties array size.");
}
// Step 7: -------------------------------------------------
// Get the data from the query in step 6 -------------------
PointerByReference pclsObj = new PointerByReference();
LongByReference uReturn = new LongByReference(0L);
while (enumerator.getPointer() != Pointer.NULL) {
HRESULT hres = enumerator.Next(new NativeLong(EnumWbemClassObject.WBEM_INFINITE), new NativeLong(1),
pclsObj, uReturn);
// Requested 1; if 0 objects returned, we're done
if (0L == uReturn.getValue() || COMUtils.FAILED(hres)) {
// Enumerator will be released by calling method so no need to
// release it here.
return;
}
VARIANT.ByReference vtProp = new VARIANT.ByReference();
// Get the value of the properties
WbemClassObject clsObj = new WbemClassObject(pclsObj.getValue());
for (int p = 0; p < properties.length; p++) {
String property = properties[p];
hres = clsObj.Get(new BSTR(property), new NativeLong(0L), vtProp, null, null);
ValueType propertyType = propertyTypes.length > 1 ? propertyTypes[p] : propertyTypes[0];
switch (propertyType) {
// WMI Longs will return as strings
case STRING:
values.get(property).add(vtProp.getValue() == null ? "unknown" : vtProp.stringValue());
break;
// WMI Uint32s will return as longs
case UINT32: // WinDef.LONG TODO improve in JNA 4.3
values.get(property)
.add(vtProp.getValue() == null ? 0L : vtProp._variant.__variant.lVal.longValue());
break;
case FLOAT:
values.get(property).add(vtProp.getValue() == null ? 0f : vtProp.floatValue());
break;
case DATETIME:
// Read a string in format 20160513072950.782000-420 and
// parse to a long representing ms since eopch
values.get(property)
.add(vtProp.getValue() == null ? 0L : ParseUtil.cimDateTimeToMillis(vtProp.stringValue()));
break;
case BOOLEAN: // WinDef.BOOL TODO improve in JNA 4.3
values.get(property)
.add(vtProp.getValue() == null ? 0L : vtProp._variant.__variant.boolVal.booleanValue());
break;
default:
// Should never get here! If you get this exception you've
// added something to the enum without adding it here. Tsk.
throw new IllegalArgumentException("Unimplemented enum type: " + propertyType.toString());
}
OleAuto.INSTANCE.VariantClear(vtProp.getPointer());
}
clsObj.Release();
}
}
#location 31
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
private List<OSFileStore> getWmiVolumes() {
Map<String, List<Object>> drives;
List<OSFileStore> fs;
String volume;
long free;
long total;
fs = new ArrayList<>();
drives = WmiUtil.selectObjectsFrom(null, "Win32_LogicalDisk", FS_PROPERTIES, null, FS_TYPES);
for (int i = 0; i < drives.get(NAME_PROPERTY).size(); i++) {
free = (Long) drives.get(FREESPACE_PROPERTY).get(i);
total = (Long) drives.get(SIZE_PROPERTY).get(i);
String description = (String) drives.get(DESCRIPTION_PROPERTY).get(i);
String name = (String) drives.get(NAME_PROPERTY).get(i);
long type = (Long) drives.get(DRIVE_TYPE_PROPERTY).get(i);
if (type != 4) {
char[] chrVolume = new char[BUFSIZE];
Kernel32.INSTANCE.GetVolumeNameForVolumeMountPoint(name + "\\", chrVolume, BUFSIZE);
volume = new String(chrVolume).trim();
} else {
volume = (String) drives.get(PROVIDER_NAME_PROPERTY).get(i);
String[] split = volume.split("\\\\");
if (split.length > 1 && split[split.length - 1].length() > 0) {
description = split[split.length - 1];
}
}
fs.add(new OSFileStore(String.format("%s (%s)", description, name), volume, name + "\\", getDriveType(name),
(String) drives.get(FILESYSTEM_PROPERTY).get(i), "", free, total));
}
return fs;
}
|
#vulnerable code
private List<OSFileStore> getWmiVolumes() {
Map<String, List<String>> drives;
List<OSFileStore> fs;
String volume;
long free;
long total;
fs = new ArrayList<>();
drives = WmiUtil.selectStringsFrom(null, "Win32_LogicalDisk",
"Name,Description,ProviderName,FileSystem,Freespace,Size", null);
for (int i = 0; i < drives.get("Name").size(); i++) {
free = ParseUtil.parseLongOrDefault(drives.get("Freespace").get(i), 0L);
total = ParseUtil.parseLongOrDefault(drives.get("Size").get(i), 0L);
String description = drives.get("Description").get(i);
long type = WmiUtil.selectUint32From(null, "Win32_LogicalDisk", "DriveType",
"WHERE Name = '" + drives.get("Name").get(i) + "'");
if (type != 4) {
char[] chrVolume = new char[BUFSIZE];
Kernel32.INSTANCE.GetVolumeNameForVolumeMountPoint(drives.get("Name").get(i) + "\\", chrVolume,
BUFSIZE);
volume = new String(chrVolume).trim();
} else {
volume = drives.get("ProviderName").get(i);
String[] split = volume.split("\\\\");
if (split.length > 1 && split[split.length - 1].length() > 0) {
description = split[split.length - 1];
}
}
fs.add(new OSFileStore(String.format("%s (%s)", description, drives.get("Name").get(i)), volume,
drives.get("Name").get(i) + "\\", getDriveType(drives.get("Name").get(i)),
drives.get("FileSystem").get(i), "", free, total));
}
return fs;
}
#location 14
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Override
public long[] querySystemCpuLoadTicks() {
// convert the Linux Jiffies to Milliseconds.
long[] ticks = CpuStat.getSystemCpuLoadTicks();
// In rare cases, /proc/stat reading fails. If so, try again.
if (LongStream.of(ticks).sum() == 0) {
ticks = CpuStat.getSystemCpuLoadTicks();
}
long hz = LinuxOperatingSystem.getHz();
for (int i = 0; i < ticks.length; i++) {
ticks[i] = ticks[i] * 1000L / hz;
}
return ticks;
}
|
#vulnerable code
@Override
public long[] querySystemCpuLoadTicks() {
// convert the Linux Jiffies to Milliseconds.
long[] ticks = CpuStat.getSystemCpuLoadTicks();
long hz = LinuxOperatingSystem.getHz();
for (int i = 0; i < ticks.length; i++) {
ticks[i] = ticks[i] * 1000L / hz;
}
return ticks;
}
#location 4
#vulnerability type INTERFACE_NOT_THREAD_SAFE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
public void removeAllCounters() {
// Remove all counters from counterHandle map
for (HANDLEByReference href : counterHandleMap.values()) {
PerfDataUtil.removeCounter(href);
}
counterHandleMap.clear();
// Remove query
if (this.queryHandle != null) {
PerfDataUtil.closeQuery(this.queryHandle);
}
this.queryHandle = null;
}
|
#vulnerable code
public void removeAllCounters() {
// Remove all counter handles
for (HANDLEByReference href : counterHandleMap.values()) {
PerfDataUtil.removeCounter(href);
}
counterHandleMap.clear();
// Remove all queries
for (HANDLEByReference query : queryHandleMap.values()) {
PerfDataUtil.closeQuery(query);
}
queryHandleMap.clear();
queryCounterMap.clear();
}
#location 4
#vulnerability type INTERFACE_NOT_THREAD_SAFE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
private static String getCardCodec(File cardDir) {
String cardCodec = "";
File[] cardFiles = cardDir.listFiles();
if (cardFiles == null) {
return "";
}
for (File file : cardDir.listFiles()) {
if (file.getName().startsWith("codec")) {
cardCodec = FileUtil.getKeyValueMapFromFile(file.getPath(), ":").get("Codec");
}
}
return cardCodec;
}
|
#vulnerable code
private static String getCardCodec(File cardDir) {
String cardCodec = "";
for (File file : cardDir.listFiles()) {
if (file.getName().startsWith("codec")) {
cardCodec = FileUtil.getKeyValueMapFromFile(file.getPath(), ":").get("Codec");
}
}
return cardCodec;
}
#location 3
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
private static void enumerateProperties(Map<String, List<Object>> values, EnumWbemClassObject enumerator,
String[] properties, ValueType[] propertyTypes) {
if (propertyTypes.length > 1 && properties.length != propertyTypes.length) {
throw new IllegalArgumentException("Property type array size must be 1 or equal to properties array size.");
}
// Step 7: -------------------------------------------------
// Get the data from the query in step 6 -------------------
PointerByReference pclsObj = new PointerByReference();
LongByReference uReturn = new LongByReference(0L);
while (enumerator.getPointer() != Pointer.NULL) {
HRESULT hres = enumerator.Next(new NativeLong(EnumWbemClassObject.WBEM_INFINITE), new NativeLong(1),
pclsObj, uReturn);
// Requested 1; if 0 objects returned, we're done
if (0L == uReturn.getValue() || COMUtils.FAILED(hres)) {
// Enumerator will be released by calling method so no need to
// release it here.
return;
}
VARIANT.ByReference vtProp = new VARIANT.ByReference();
// Get the value of the properties
WbemClassObject clsObj = new WbemClassObject(pclsObj.getValue());
for (int p = 0; p < properties.length; p++) {
String property = properties[p];
hres = clsObj.Get(new BSTR(property), new NativeLong(0L), vtProp, null, null);
ValueType propertyType = propertyTypes.length > 1 ? propertyTypes[p] : propertyTypes[0];
switch (propertyType) {
// WMI Longs will return as strings
case STRING:
values.get(property).add(vtProp.getValue() == null ? "unknown" : vtProp.stringValue());
break;
// WMI Uint32s will return as longs
case UINT32: // WinDef.LONG TODO improve in JNA 4.3
values.get(property)
.add(vtProp.getValue() == null ? 0L : vtProp._variant.__variant.lVal.longValue());
break;
case FLOAT:
values.get(property).add(vtProp.getValue() == null ? 0f : vtProp.floatValue());
break;
case DATETIME:
// Read a string in format 20160513072950.782000-420 and
// parse to a long representing ms since eopch
values.get(property).add(ParseUtil.cimDateTimeToMillis(vtProp.stringValue()));
break;
default:
// Should never get here! If you get this exception you've
// added something to the enum without adding it here. Tsk.
throw new IllegalArgumentException("Unimplemented enum type: " + propertyType.toString());
}
OleAuto.INSTANCE.VariantClear(vtProp.getPointer());
}
clsObj.Release();
}
}
|
#vulnerable code
private static void enumerateProperties(Map<String, List<Object>> values, EnumWbemClassObject enumerator,
String[] properties, ValueType[] propertyTypes) {
if (propertyTypes.length > 1 && properties.length != propertyTypes.length) {
throw new IllegalArgumentException("Property type array size must be 1 or equal to properties array size.");
}
// Step 7: -------------------------------------------------
// Get the data from the query in step 6 -------------------
PointerByReference pclsObj = new PointerByReference();
LongByReference uReturn = new LongByReference(0L);
while (enumerator.getPointer() != Pointer.NULL) {
HRESULT hres = enumerator.Next(new NativeLong(EnumWbemClassObject.WBEM_INFINITE), new NativeLong(1),
pclsObj, uReturn);
// Requested 1; if 0 objects returned, we're done
if (0L == uReturn.getValue() || COMUtils.FAILED(hres)) {
enumerator.Release();
return;
}
VARIANT.ByReference vtProp = new VARIANT.ByReference();
// Get the value of the properties
WbemClassObject clsObj = new WbemClassObject(pclsObj.getValue());
for (int p = 0; p < properties.length; p++) {
String property = properties[p];
hres = clsObj.Get(new BSTR(property), new NativeLong(0L), vtProp, null, null);
ValueType propertyType = propertyTypes.length > 1 ? propertyTypes[p] : propertyTypes[0];
switch (propertyType) {
// WMI Longs will return as strings
case STRING:
values.get(property).add(vtProp.getValue() == null ? "unknown" : vtProp.stringValue());
break;
// WMI Uint32s will return as longs
case UINT32: // WinDef.LONG TODO improve in JNA 4.3
values.get(property)
.add(vtProp.getValue() == null ? 0L : vtProp._variant.__variant.lVal.longValue());
break;
case FLOAT:
values.get(property).add(vtProp.getValue() == null ? 0f : vtProp.floatValue());
break;
case DATETIME:
// Read a string in format 20160513072950.782000-420 and
// parse to a long representing ms since eopch
values.get(property).add(ParseUtil.cimDateTimeToMillis(vtProp.stringValue()));
break;
default:
// Should never get here! If you get this exception you've
// added something to the enum without adding it here. Tsk.
throw new IllegalArgumentException("Unimplemented enum type: " + propertyType.toString());
}
OleAuto.INSTANCE.VariantClear(vtProp.getPointer());
}
clsObj.Release();
}
}
#location 30
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
public static <T extends Enum<T>> Map<T, Long> queryValues(Class<T> propertyEnum, String perfObject,
String perfWmiClass) {
// Check without locking for performance
if (!failedQueryCache.contains(perfObject)) {
failedQueryCacheLock.lock();
try {
// Double check lock
if (!failedQueryCache.contains(perfObject)) {
Map<T, Long> valueMap = queryValuesFromPDH(propertyEnum, perfObject);
if (!valueMap.isEmpty()) {
return valueMap;
}
// If we are here, query failed
LOG.warn("Disabling further attempts to query {}.", perfObject);
failedQueryCache.add(perfObject);
}
} finally {
failedQueryCacheLock.unlock();
}
}
return queryValuesFromWMI(propertyEnum, perfWmiClass);
}
|
#vulnerable code
public static <T extends Enum<T>> Map<T, Long> queryValues(Class<T> propertyEnum, String perfObject,
String perfWmiClass) {
Map<T, Long> valueMap = queryValuesFromPDH(propertyEnum, perfObject);
if (valueMap.isEmpty()) {
return queryValuesFromWMI(propertyEnum, perfWmiClass);
}
return valueMap;
}
#location 3
#vulnerability type INTERFACE_NOT_THREAD_SAFE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
private List<OSFileStore> getWmiVolumes() {
Map<String, List<Object>> drives;
List<OSFileStore> fs;
String volume;
long free;
long total;
fs = new ArrayList<>();
drives = WmiUtil.selectObjectsFrom(null, "Win32_LogicalDisk", FS_PROPERTIES, null, FS_TYPES);
for (int i = 0; i < drives.get(NAME_PROPERTY).size(); i++) {
free = (Long) drives.get(FREESPACE_PROPERTY).get(i);
total = (Long) drives.get(SIZE_PROPERTY).get(i);
String description = (String) drives.get(DESCRIPTION_PROPERTY).get(i);
String name = (String) drives.get(NAME_PROPERTY).get(i);
long type = (Long) drives.get(DRIVE_TYPE_PROPERTY).get(i);
if (type != 4) {
char[] chrVolume = new char[BUFSIZE];
Kernel32.INSTANCE.GetVolumeNameForVolumeMountPoint(name + "\\", chrVolume, BUFSIZE);
volume = new String(chrVolume).trim();
} else {
volume = (String) drives.get(PROVIDER_NAME_PROPERTY).get(i);
String[] split = volume.split("\\\\");
if (split.length > 1 && split[split.length - 1].length() > 0) {
description = split[split.length - 1];
}
}
fs.add(new OSFileStore(String.format("%s (%s)", description, name), volume, name + "\\", getDriveType(name),
(String) drives.get(FILESYSTEM_PROPERTY).get(i), "", free, total));
}
return fs;
}
|
#vulnerable code
private List<OSFileStore> getWmiVolumes() {
Map<String, List<String>> drives;
List<OSFileStore> fs;
String volume;
long free;
long total;
fs = new ArrayList<>();
drives = WmiUtil.selectStringsFrom(null, "Win32_LogicalDisk",
"Name,Description,ProviderName,FileSystem,Freespace,Size", null);
for (int i = 0; i < drives.get("Name").size(); i++) {
free = ParseUtil.parseLongOrDefault(drives.get("Freespace").get(i), 0L);
total = ParseUtil.parseLongOrDefault(drives.get("Size").get(i), 0L);
String description = drives.get("Description").get(i);
long type = WmiUtil.selectUint32From(null, "Win32_LogicalDisk", "DriveType",
"WHERE Name = '" + drives.get("Name").get(i) + "'");
if (type != 4) {
char[] chrVolume = new char[BUFSIZE];
Kernel32.INSTANCE.GetVolumeNameForVolumeMountPoint(drives.get("Name").get(i) + "\\", chrVolume,
BUFSIZE);
volume = new String(chrVolume).trim();
} else {
volume = drives.get("ProviderName").get(i);
String[] split = volume.split("\\\\");
if (split.length > 1 && split[split.length - 1].length() > 0) {
description = split[split.length - 1];
}
}
fs.add(new OSFileStore(String.format("%s (%s)", description, drives.get("Name").get(i)), volume,
drives.get("Name").get(i) + "\\", getDriveType(drives.get("Name").get(i)),
drives.get("FileSystem").get(i), "", free, total));
}
return fs;
}
#location 19
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Override
public HWDiskStore[] getDisks() {
List<HWDiskStore> result;
result = new ArrayList<>();
readMap.clear();
writeMap.clear();
populateReadWriteMaps();
Map<String, List<Object>> vals = WmiUtil.selectObjectsFrom(null, "Win32_DiskDrive",
"Name,Manufacturer,Model,SerialNumber,Size,Index", null, DRIVE_TYPES);
for (int i = 0; i < vals.get("Name").size(); i++) {
HWDiskStore ds = new HWDiskStore();
ds.setName((String) vals.get("Name").get(i));
ds.setModel(String.format("%s %s", vals.get("Model").get(i), vals.get("Manufacturer").get(i)).trim());
// Most vendors store serial # as a hex string; convert
ds.setSerial(ParseUtil.hexStringToString((String) vals.get("SerialNumber").get(i)));
String index = vals.get("Index").get(i).toString();
if (readMap.containsKey(index)) {
ds.setReads(readMap.get(index));
}
if (writeMap.containsKey(index)) {
ds.setWrites(writeMap.get(index));
}
// If successful this line is the desired value
try {
ds.setSize(Long.parseLong((String) vals.get("Size").get(i)));
} catch (NumberFormatException e) {
// If we failed to parse, give up
// This is expected for an empty string on some drives
ds.setSize(0L);
}
result.add(ds);
}
return result.toArray(new HWDiskStore[result.size()]);
}
|
#vulnerable code
@Override
public HWDiskStore[] getDisks() {
List<HWDiskStore> result;
result = new ArrayList<>();
Map<String, List<String>> vals = WmiUtil.selectStringsFrom(null, "Win32_DiskDrive",
"Name,Manufacturer,Model,SerialNumber,Size", null);
for (int i = 0; i < vals.get("Name").size(); i++) {
HWDiskStore ds = new HWDiskStore();
ds.setName(vals.get("Name").get(i));
ds.setModel(String.format("%s %s", vals.get("Model").get(i), vals.get("Manufacturer").get(i)).trim());
// Most vendors store serial # as a hex string; convert
ds.setSerial(ParseUtil.hexStringToString(vals.get("SerialNumber").get(i)));
// If successful this line is the desired value
try {
ds.setSize(Long.parseLong(vals.get("Size").get(i)));
} catch (NumberFormatException e) {
// If we failed to parse, give up
// This is expected for an empty string on some drives
ds.setSize(0L);
}
result.add(ds);
}
return result.toArray(new HWDiskStore[result.size()]);
}
#location 8
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
public static <T extends Enum<T>> Pair<List<String>, Map<T, List<Long>>> queryInstancesAndValues(
Class<T> propertyEnum, String perfObject, String perfWmiClass) {
// Check without locking for performance
if (!failedQueryCache.contains(perfObject)) {
failedQueryCacheLock.lock();
try {
// Double check lock
if (!failedQueryCache.contains(perfObject)) {
Pair<List<String>, Map<T, List<Long>>> instancesAndValuesMap = queryInstancesAndValuesFromPDH(
propertyEnum, perfObject);
if (!instancesAndValuesMap.getA().isEmpty()) {
return instancesAndValuesMap;
}
// If we are here, query failed
LOG.warn("Disabling further attempts to query {}.", perfObject);
failedQueryCache.add(perfObject);
}
} finally {
failedQueryCacheLock.unlock();
}
}
return queryInstancesAndValuesFromWMI(propertyEnum, perfWmiClass);
}
|
#vulnerable code
public static <T extends Enum<T>> Pair<List<String>, Map<T, List<Long>>> queryInstancesAndValues(
Class<T> propertyEnum, String perfObject, String perfWmiClass) {
Pair<List<String>, Map<T, List<Long>>> instancesAndValuesMap = queryInstancesAndValuesFromPDH(propertyEnum,
perfObject);
if (instancesAndValuesMap.getA().isEmpty()) {
return queryInstancesAndValuesFromWMI(propertyEnum, perfWmiClass);
}
return instancesAndValuesMap;
}
#location 3
#vulnerability type INTERFACE_NOT_THREAD_SAFE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
protected void updateMeminfo() {
long now = System.currentTimeMillis();
if (now - this.lastUpdate > 100) {
if (!Psapi.INSTANCE.GetPerformanceInfo(perfInfo, perfInfo.size())) {
LOG.error("Failed to get Performance Info. Error code: {}", Kernel32.INSTANCE.GetLastError());
return;
}
this.memAvailable = perfInfo.PageSize.longValue() * perfInfo.PhysicalAvailable.longValue();
this.memTotal = perfInfo.PageSize.longValue() * perfInfo.PhysicalTotal.longValue();
this.swapTotal = perfInfo.PageSize.longValue()
* (perfInfo.CommitLimit.longValue() - perfInfo.PhysicalTotal.longValue());
this.lastUpdate = now;
}
}
|
#vulnerable code
protected void updateMeminfo() {
long now = System.currentTimeMillis();
if (now - this.lastUpdate > 100) {
if (!Psapi.INSTANCE.GetPerformanceInfo(perfInfo, perfInfo.size())) {
LOG.error("Failed to get Performance Info. Error code: {}", Kernel32.INSTANCE.GetLastError());
this.perfInfo = null;
}
this.memAvailable = perfInfo.PageSize.longValue() * perfInfo.PhysicalAvailable.longValue();
this.memTotal = perfInfo.PageSize.longValue() * perfInfo.PhysicalTotal.longValue();
this.swapTotal = perfInfo.PageSize.longValue()
* (perfInfo.CommitLimit.longValue() - perfInfo.PhysicalTotal.longValue());
this.lastUpdate = now;
}
}
#location 8
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
private List<OSFileStore> getWmiVolumes() {
Map<String, List<Object>> drives;
List<OSFileStore> fs;
String volume;
long free;
long total;
fs = new ArrayList<>();
drives = WmiUtil.selectObjectsFrom(null, "Win32_LogicalDisk", FS_PROPERTIES, null, FS_TYPES);
for (int i = 0; i < drives.get(NAME_PROPERTY).size(); i++) {
free = (Long) drives.get(FREESPACE_PROPERTY).get(i);
total = (Long) drives.get(SIZE_PROPERTY).get(i);
String description = (String) drives.get(DESCRIPTION_PROPERTY).get(i);
String name = (String) drives.get(NAME_PROPERTY).get(i);
long type = (Long) drives.get(DRIVE_TYPE_PROPERTY).get(i);
if (type != 4) {
char[] chrVolume = new char[BUFSIZE];
Kernel32.INSTANCE.GetVolumeNameForVolumeMountPoint(name + "\\", chrVolume, BUFSIZE);
volume = new String(chrVolume).trim();
} else {
volume = (String) drives.get(PROVIDER_NAME_PROPERTY).get(i);
String[] split = volume.split("\\\\");
if (split.length > 1 && split[split.length - 1].length() > 0) {
description = split[split.length - 1];
}
}
fs.add(new OSFileStore(String.format("%s (%s)", description, name), volume, name + "\\", getDriveType(name),
(String) drives.get(FILESYSTEM_PROPERTY).get(i), "", free, total));
}
return fs;
}
|
#vulnerable code
private List<OSFileStore> getWmiVolumes() {
Map<String, List<String>> drives;
List<OSFileStore> fs;
String volume;
long free;
long total;
fs = new ArrayList<>();
drives = WmiUtil.selectStringsFrom(null, "Win32_LogicalDisk",
"Name,Description,ProviderName,FileSystem,Freespace,Size", null);
for (int i = 0; i < drives.get("Name").size(); i++) {
free = ParseUtil.parseLongOrDefault(drives.get("Freespace").get(i), 0L);
total = ParseUtil.parseLongOrDefault(drives.get("Size").get(i), 0L);
String description = drives.get("Description").get(i);
long type = WmiUtil.selectUint32From(null, "Win32_LogicalDisk", "DriveType",
"WHERE Name = '" + drives.get("Name").get(i) + "'");
if (type != 4) {
char[] chrVolume = new char[BUFSIZE];
Kernel32.INSTANCE.GetVolumeNameForVolumeMountPoint(drives.get("Name").get(i) + "\\", chrVolume,
BUFSIZE);
volume = new String(chrVolume).trim();
} else {
volume = drives.get("ProviderName").get(i);
String[] split = volume.split("\\\\");
if (split.length > 1 && split[split.length - 1].length() > 0) {
description = split[split.length - 1];
}
}
fs.add(new OSFileStore(String.format("%s (%s)", description, drives.get("Name").get(i)), volume,
drives.get("Name").get(i) + "\\", getDriveType(drives.get("Name").get(i)),
drives.get("FileSystem").get(i), "", free, total));
}
return fs;
}
#location 15
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Test
public void testGetChildProcesses() {
// Testing child processes is tricky because we don't really know a
// priori what processes might have children, and if we do test the full
// list vs. individual processes, we run into a race condition where
// child processes can start or stop before we measure a second time. So
// we can't really test for one-to-one correspondence of child process
// lists.
//
// We can expect code logic failures to occur all/most of the time for
// categories of processes, however, and allow occasional differences
// due to race conditions. So we will test three categories of
// processes: Those with 0 children, those with exactly 1 child process,
// and those with multiple child processes. On the second poll, we
// expect at least half of those categories to still be in the same
// category.
//
SystemInfo si = new SystemInfo();
OperatingSystem os = si.getOperatingSystem();
OSProcess[] processes = os.getProcesses(0, null);
Set<Integer> zeroChildSet = new HashSet<>();
Set<Integer> oneChildSet = new HashSet<>();
Set<Integer> manyChildSet = new HashSet<>();
// Initialize all processes with no children
for (OSProcess p : processes) {
zeroChildSet.add(p.getProcessID());
}
// Move parents with 1 or more children to other set
for (OSProcess p : processes) {
if (zeroChildSet.contains(p.getParentProcessID())) {
// Zero to One
zeroChildSet.remove(p.getParentProcessID());
oneChildSet.add(p.getParentProcessID());
} else if (oneChildSet.contains(p.getParentProcessID())) {
// One to many
oneChildSet.remove(p.getParentProcessID());
manyChildSet.add(p.getParentProcessID());
}
}
// Now test that majority of each set is in same category
int matched = 0;
int total = 0;
for (Integer i : zeroChildSet) {
if (os.getChildProcesses(i, 0, null).length == 0) {
matched++;
}
// Quit if enough to test
if (++total > 9) {
break;
}
}
if (total > 4) {
assertTrue("Most processes with no children should not suddenly have them.", matched > total / 2);
}
matched = 0;
total = 0;
for (Integer i : oneChildSet) {
if (os.getChildProcesses(i, 0, null).length == 1) {
matched++;
}
// Quit if enough to test
if (++total > 9) {
break;
}
}
if (total > 4) {
assertTrue("Most processes with one child should not suddenly have zero or more than one.",
matched > total / 2);
}
matched = 0;
total = 0;
for (Integer i : manyChildSet) {
if (os.getChildProcesses(i, 0, null).length > 1) {
matched++;
}
// Quit if enough to test
if (++total > 9) {
break;
}
}
if (total > 4) {
assertTrue("Most processes with more than one child should not suddenly have one or less.",
matched > total / 2);
}
}
|
#vulnerable code
@Test
public void testGetChildProcesses() {
// Get list of PIDS
SystemInfo si = new SystemInfo();
OperatingSystem os = si.getOperatingSystem();
OSProcess[] processes = os.getProcesses(0, null);
Map<Integer, Integer> childMap = new HashMap<>();
// First iteration to set all 0's
for (OSProcess p : processes) {
childMap.put(p.getProcessID(), 0);
childMap.put(p.getParentProcessID(), 0);
}
// Second iteration to count children
for (OSProcess p : processes) {
childMap.put(p.getParentProcessID(), childMap.get(p.getParentProcessID()) + 1);
}
// Find a PID with 0, 1, and N>1 children
int zeroPid = -1;
int onePid = -1;
int nPid = -1;
int nNum = 0;
int mPid = -1;
int mNum = 0;
for (Integer i : childMap.keySet()) {
if (zeroPid < 0 && childMap.get(i) == 0) {
zeroPid = i;
} else if (onePid < 0 && childMap.get(i) == 1) {
onePid = i;
} else if (nPid < 0 && childMap.get(i) > 1) {
// nPid is probably PID=1 with all PIDs with no other parent
nPid = i;
nNum = childMap.get(i);
} else if (mPid < 0 && childMap.get(i) > 1) {
mPid = i;
mNum = childMap.get(i);
}
if (zeroPid >= 0 && onePid >= 0 && nPid >= 0 && mPid >= 0) {
break;
}
}
if (zeroPid >= 0) {
assertEquals(0, os.getChildProcesses(zeroPid, 0, null).length);
}
if (SystemInfo.getCurrentPlatformEnum() != PlatformEnum.SOLARIS) {
// Due to race condition, a process may terminate before we count
// its children. Play the odds.
// At least one of these tests should work.
if (onePid >= 0 && nPid >= 0 && mPid >= 0) {
assertTrue(os.getChildProcesses(onePid, 0, null).length == 1
|| os.getChildProcesses(nPid, 0, null).length == nNum
|| os.getChildProcesses(mPid, 0, null).length == mNum);
}
}
}
#location 25
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Override
public double getCpuTemperature() {
// Initialize
double tempC = 0d;
// If Open Hardware Monitor identifier is set, we couldn't get through
// normal WMI, and got ID from OHM at least once so go directly to OHM
if (this.tempIdentifierStr != null) {
double[] vals = wmiGetValuesForKeys("/namespace:\\\\root\\OpenHardwareMonitor PATH Sensor",
this.tempIdentifierStr, "Temperature", "Parent,SensorType,Value");
if (vals.length > 0) {
double sum = 0;
for (double val : vals) {
sum += val;
}
tempC = sum / vals.length;
}
return tempC;
}
// This branch is used the first time and all subsequent times if
// successful (tempIdenifierStr == null)
// Try to get value using initial or updated successful values
int tempK = 0;
if (this.wmiTempPath == null) {
this.wmiTempPath = "Temperature";
this.wmiTempProperty = "CurrentReading";
tempK = wmiGetValue(this.wmiTempPath, this.wmiTempProperty);
if (tempK < 0) {
this.wmiTempPath = "/namespace:\\\\root\\cimv2 PATH Win32_TemperatureProbe";
tempK = wmiGetValue(this.wmiTempPath, this.wmiTempProperty);
}
if (tempK < 0) {
this.wmiTempPath = "/namespace:\\\\root\\wmi PATH MSAcpi_ThermalZoneTemperature";
this.wmiTempProperty = "CurrentTemperature";
tempK = wmiGetValue(this.wmiTempPath, this.wmiTempProperty);
}
} else {
// We've successfully read a previous time, or failed both here and
// with OHM
tempK = wmiGetValue(this.wmiTempPath, this.wmiTempProperty);
}
// Convert K to C and return result
if (tempK > 0) {
tempC = (tempK / 10d) - 273.15;
}
if (tempC <= 0d) {
// Unable to get temperature via WMI. Future attempts will be
// attempted via Open Hardware Monitor WMI if successful
String[] cpuIdentifiers = wmiGetStrValuesForKey("/namespace:\\\\root\\OpenHardwareMonitor PATH Hardware",
"CPU", "HardwareType,Identifier");
if (cpuIdentifiers.length > 0) {
this.tempIdentifierStr = cpuIdentifiers[0];
}
// If not null, recurse and get value via OHM
if (this.tempIdentifierStr != null) {
return getCpuTemperature();
}
}
return tempC;
}
|
#vulnerable code
@Override
public double getCpuTemperature() {
ArrayList<String> hwInfo = ExecutingCommand.runNative("wmic Temperature get CurrentReading");
for (String checkLine : hwInfo) {
if (checkLine.length() == 0 || checkLine.toLowerCase().contains("currentreading")) {
continue;
} else {
// If successful this line is in tenths of degrees Kelvin
try {
int tempK = Integer.parseInt(checkLine.trim());
if (tempK > 0) {
return (tempK - 2715) / 10d;
}
} catch (NumberFormatException e) {
// If we failed to parse, give up
}
break;
}
}
// Above query failed, try something else
hwInfo = ExecutingCommand
.runNative("wmic /namespace:\\\\root\\wmi PATH MSAcpi_ThermalZoneTemperature get CurrentTemperature");
for (String checkLine : hwInfo) {
if (checkLine.length() == 0 || checkLine.toLowerCase().contains("currenttemperature")) {
continue;
} else {
// If successful this line is in tenths of degrees Kelvin
try {
int tempK = Integer.parseInt(checkLine.trim());
if (tempK > 0) {
return (tempK - 2715) / 10d;
}
} catch (NumberFormatException e) {
// If we failed to parse, give up
}
break;
}
}
// Above query failed, try something else
hwInfo = ExecutingCommand
.runNative("wmic /namespace:\\\\root\\cimv2 PATH Win32_TemperatureProbe get CurrentReading");
for (String checkLine : hwInfo) {
if (checkLine.length() == 0 || checkLine.toLowerCase().contains("currentreading")) {
continue;
} else {
// If successful this line is in tenths of degrees Kelvin
try {
int tempK = Integer.parseInt(checkLine.trim());
if (tempK > 0) {
return (tempK - 2715) / 10d;
}
} catch (NumberFormatException e) {
// If we failed to parse, give up
}
break;
}
}
return 0d;
}
#location 4
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Override
protected void updateSwap() {
updateMeminfo();
this.swapUsed = PdhUtil.queryCounter(pdhPagingPercentUsageCounter) * this.pageSize;
}
|
#vulnerable code
@Override
protected void updateSwap() {
updateMeminfo();
Map<String, List<Long>> usage = WmiUtil.selectUint32sFrom(null, "Win32_PerfRawData_PerfOS_PagingFile",
"PercentUsage,PercentUsage_Base", "WHERE Name=\"_Total\"");
if (!usage.get("PercentUsage").isEmpty()) {
this.swapUsed = this.swapTotal * usage.get("PercentUsage").get(0) / usage.get("PercentUsage_Base").get(0);
}
}
#location 7
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
public static UsbDevice[] getUsbDevices() {
// Reusable buffer for getting IO name strings
Pointer buffer = new Memory(128); // io_name_t is char[128]
// Build a list of devices with no parent; these will be the roots
List<Long> usbControllers = new ArrayList<>();
// Empty out maps
nameMap.clear();
vendorMap.clear();
serialMap.clear();
hubMap.clear();
// Iterate over USB Controllers. All devices are children of one of
// these controllers in the "IOService" plane
IntByReference iter = new IntByReference();
IOKitUtil.getMatchingServices("IOUSBController", iter);
int device = IOKit.INSTANCE.IOIteratorNext(iter.getValue());
while (device != 0) {
// Unique global identifier for this device
LongByReference id = new LongByReference();
IOKit.INSTANCE.IORegistryEntryGetRegistryEntryID(device, id);
usbControllers.add(id.getValue());
// Get device name and store in map
IOKit.INSTANCE.IORegistryEntryGetName(device, buffer);
nameMap.put(id.getValue(), buffer.getString(0));
// Controllers don't have vendor and serial so ignore at this level
// Now iterate the children of this device in the "IOService" plane.
// If devices have a parent, link to that parent, otherwise link to
// the controller as parent
IntByReference childIter = new IntByReference();
IOKit.INSTANCE.IORegistryEntryGetChildIterator(device, "IOService", childIter);
int childDevice = IOKit.INSTANCE.IOIteratorNext(childIter.getValue());
while (childDevice != 0) {
// Unique global identifier for this device
LongByReference childId = new LongByReference();
IOKit.INSTANCE.IORegistryEntryGetRegistryEntryID(childDevice, childId);
// Get this device's parent in the "IOUSB" plane
IntByReference parent = new IntByReference();
IOKit.INSTANCE.IORegistryEntryGetParentEntry(childDevice, "IOUSB", parent);
// If parent is named "Root" ignore that id and use the
// controller's id
LongByReference parentId = id;
IOKit.INSTANCE.IORegistryEntryGetName(parent.getValue(), buffer);
if (!buffer.getString(0).equals("Root")) {
// Unique global identifier for the parent
parentId = new LongByReference();
IOKit.INSTANCE.IORegistryEntryGetRegistryEntryID(parent.getValue(), parentId);
}
// Store parent in map
if (!hubMap.containsKey(parentId.getValue())) {
hubMap.put(parentId.getValue(), new ArrayList<Long>());
}
hubMap.get(parentId.getValue()).add(childId.getValue());
// Get device name and store in map
IOKit.INSTANCE.IORegistryEntryGetName(childDevice, buffer);
nameMap.put(childId.getValue(), buffer.getString(0));
// Get vendor and store in map
CFTypeRef vendorRef = IOKit.INSTANCE.IORegistryEntryCreateCFProperty(childDevice, cfVendor,
CfUtil.ALLOCATOR, 0);
if (vendorRef != null && vendorRef.getPointer() != null) {
vendorMap.put(childId.getValue(), CfUtil.cfPointerToString(vendorRef.getPointer()));
}
CfUtil.release(vendorRef);
// Get serial and store in map
CFTypeRef serialRef = IOKit.INSTANCE.IORegistryEntryCreateCFProperty(childDevice, cfSerial,
CfUtil.ALLOCATOR, 0);
if (serialRef != null && serialRef.getPointer() != null) {
serialMap.put(childId.getValue(), CfUtil.cfPointerToString(serialRef.getPointer()));
}
CfUtil.release(serialRef);
IOKit.INSTANCE.IOObjectRelease(childDevice);
childDevice = IOKit.INSTANCE.IOIteratorNext(childIter.getValue());
}
IOKit.INSTANCE.IOObjectRelease(childIter.getValue());
IOKit.INSTANCE.IOObjectRelease(device);
device = IOKit.INSTANCE.IOIteratorNext(iter.getValue());
}
IOKit.INSTANCE.IOObjectRelease(iter.getValue());
// Build tree and return
List<UsbDevice> controllerDevices = new ArrayList<UsbDevice>();
for (Long controller : usbControllers) {
controllerDevices.add(getDeviceAndChildren(controller));
}
return controllerDevices.toArray(new UsbDevice[controllerDevices.size()]);
}
|
#vulnerable code
public static UsbDevice[] getUsbDevices() {
// Get heirarchical list of USB devices
List<String> xml = ExecutingCommand.runNative("system_profiler SPUSBDataType -xml");
// Look for <key>_items</key> which prcedes <array> ... </array>
// Each pair of <dict> ... </dict> following is a USB device/hub
List<String> items = new ArrayList<>();
boolean copy = false;
int indent = 0;
for (String s : xml) {
s = s.trim();
// Read until <key>_items</key>
if (!copy && s.equals("<key>_items</key>")) {
copy = true;
continue;
}
// If we've fond items indent with each <array> tag and copy over
// everything with indent > 0.
if (copy) {
if (s.equals("</array>")) {
if (--indent == 0) {
copy = false;
continue;
}
}
if (indent > 0) {
items.add(s);
}
if (s.equals("<array>")) {
indent++;
}
}
}
// Items now contains 0 or more sets of <dict>...</dict>
return getUsbDevices(items);
}
#location 9
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
public static ArrayList<String> runNative(String[] cmdToRunWithArgs) {
Process p = null;
try {
p = Runtime.getRuntime().exec(cmdToRunWithArgs);
} catch (IOException e) {
LOG.trace("", e);
return null;
}
ArrayList<String> sa = new ArrayList<>();
try (BufferedReader reader = new BufferedReader(new InputStreamReader(p.getInputStream()))) {
String line;
while ((line = reader.readLine()) != null) {
sa.add(line);
}
p.waitFor();
} catch (InterruptedException e) {
LOG.trace("", e);
return null;
} catch (IOException e) {
LOG.trace("", e);
return null;
}
return sa;
}
|
#vulnerable code
public static ArrayList<String> runNative(String[] cmdToRunWithArgs) {
Process p = null;
try {
p = Runtime.getRuntime().exec(cmdToRunWithArgs);
} catch (IOException e) {
LOG.trace("", e);
return null;
}
BufferedReader reader = new BufferedReader(new InputStreamReader(p.getInputStream()));
String line = "";
ArrayList<String> sa = new ArrayList<>();
try {
while ((line = reader.readLine()) != null) {
sa.add(line);
}
p.waitFor();
} catch (InterruptedException e) {
LOG.trace("", e);
return null;
} catch (IOException e) {
LOG.trace("", e);
return null;
}
return sa;
}
#location 18
#vulnerability type RESOURCE_LEAK
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Override
public boolean updateAttributes() {
try {
File ifDir = new File(String.format("/sys/class/net/%s/statistics", getName()));
if (!ifDir.isDirectory()) {
return false;
}
} catch (SecurityException e) {
return false;
}
String ifTypePath = String.format("/sys/class/net/%s/type", getName());
String carrierPath = String.format("/sys/class/net/%s/carrier", getName());
String txBytesPath = String.format("/sys/class/net/%s/statistics/tx_bytes", getName());
String rxBytesPath = String.format("/sys/class/net/%s/statistics/rx_bytes", getName());
String txPacketsPath = String.format("/sys/class/net/%s/statistics/tx_packets", getName());
String rxPacketsPath = String.format("/sys/class/net/%s/statistics/rx_packets", getName());
String txErrorsPath = String.format("/sys/class/net/%s/statistics/tx_errors", getName());
String rxErrorsPath = String.format("/sys/class/net/%s/statistics/rx_errors", getName());
String collisionsPath = String.format("/sys/class/net/%s/statistics/collisions", getName());
String rxDropsPath = String.format("/sys/class/net/%s/statistics/rx_dropped", getName());
String ifSpeed = String.format("/sys/class/net/%s/speed", getName());
this.timeStamp = System.currentTimeMillis();
this.ifType = FileUtil.getIntFromFile(ifTypePath);
this.connectorPresent = FileUtil.getIntFromFile(carrierPath) > 0;
this.bytesSent = FileUtil.getUnsignedLongFromFile(txBytesPath);
this.bytesRecv = FileUtil.getUnsignedLongFromFile(rxBytesPath);
this.packetsSent = FileUtil.getUnsignedLongFromFile(txPacketsPath);
this.packetsRecv = FileUtil.getUnsignedLongFromFile(rxPacketsPath);
this.outErrors = FileUtil.getUnsignedLongFromFile(txErrorsPath);
this.inErrors = FileUtil.getUnsignedLongFromFile(rxErrorsPath);
this.collisions = FileUtil.getUnsignedLongFromFile(collisionsPath);
this.inDrops = FileUtil.getUnsignedLongFromFile(rxDropsPath);
long speedMiB = FileUtil.getUnsignedLongFromFile(ifSpeed);
// speed may be -1 from file.
this.speed = speedMiB < 0 ? 0 : speedMiB << 20;
return true;
}
|
#vulnerable code
@Override
public boolean updateAttributes() {
try {
File ifDir = new File(String.format("/sys/class/net/%s/statistics", getName()));
if (!ifDir.isDirectory()) {
return false;
}
} catch (SecurityException e) {
return false;
}
String ifTypePath = String.format("/sys/class/net/%s/type", getName());
String carrierPath = String.format("/sys/class/net/%s/carrier", getName());
String txBytesPath = String.format("/sys/class/net/%s/statistics/tx_bytes", getName());
String rxBytesPath = String.format("/sys/class/net/%s/statistics/rx_bytes", getName());
String txPacketsPath = String.format("/sys/class/net/%s/statistics/tx_packets", getName());
String rxPacketsPath = String.format("/sys/class/net/%s/statistics/rx_packets", getName());
String txErrorsPath = String.format("/sys/class/net/%s/statistics/tx_errors", getName());
String rxErrorsPath = String.format("/sys/class/net/%s/statistics/rx_errors", getName());
String collisionsPath = String.format("/sys/class/net/%s/statistics/collisions", getName());
String rxDropsPath = String.format("/sys/class/net/%s/statistics/rx_dropped", getName());
String ifSpeed = String.format("/sys/class/net/%s/speed", getName());
this.timeStamp = System.currentTimeMillis();
this.ifType = FileUtil.getIntFromFile(ifTypePath);
this.connectorPresent = FileUtil.getIntFromFile(carrierPath) > 0;
this.bytesSent = FileUtil.getUnsignedLongFromFile(txBytesPath);
this.bytesRecv = FileUtil.getUnsignedLongFromFile(rxBytesPath);
this.packetsSent = FileUtil.getUnsignedLongFromFile(txPacketsPath);
this.packetsRecv = FileUtil.getUnsignedLongFromFile(rxPacketsPath);
this.outErrors = FileUtil.getUnsignedLongFromFile(txErrorsPath);
this.inErrors = FileUtil.getUnsignedLongFromFile(rxErrorsPath);
this.collisions = FileUtil.getUnsignedLongFromFile(collisionsPath);
this.inDrops = FileUtil.getUnsignedLongFromFile(rxDropsPath);
// speed may be negative from file. Convert to MiB.
this.speed = FileUtil.getUnsignedLongFromFile(ifSpeed);
this.speed = this.speed < 0 ? 0 : this.speed << 20;
return true;
}
#location 36
#vulnerability type THREAD_SAFETY_VIOLATION
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Override
public OSService[] getServices() {
// Get running services
List<OSService> services = new ArrayList<>();
Set<String> running = new HashSet<>();
for (OSProcess p : getChildProcesses(1, 0, ProcessSort.PID)) {
OSService s = new OSService(p.getName(), p.getProcessID(), RUNNING);
services.add(s);
running.add(p.getName());
}
// Get Directories for stopped services
File dir = new File("/etc/rc.d");
File[] listFiles;
if (dir.exists() && dir.isDirectory() && (listFiles = dir.listFiles()) != null) {
for (File f : listFiles) {
String name = f.getName();
if (!running.contains(name)) {
OSService s = new OSService(name, 0, STOPPED);
services.add(s);
}
}
} else {
LOG.error("Directory: /etc/init does not exist");
}
return services.toArray(new OSService[0]);
}
|
#vulnerable code
@Override
public OSService[] getServices() {
// Get running services
List<OSService> services = new ArrayList<>();
Set<String> running = new HashSet<>();
for (OSProcess p : getChildProcesses(1, 0, ProcessSort.PID)) {
OSService s = new OSService(p.getName(), p.getProcessID(), RUNNING);
services.add(s);
running.add(p.getName());
}
// Get Directories for stopped services
File dir = new File("/etc/rc.d");
if (dir.exists() && dir.isDirectory()) {
for (File f : dir.listFiles()) {
String name = f.getName();
if (!running.contains(name)) {
OSService s = new OSService(name, 0, STOPPED);
services.add(s);
}
}
} else {
LOG.error("Directory: /etc/init does not exist");
}
return services.toArray(new OSService[0]);
}
#location 14
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
private List<OSFileStore> getWmiVolumes() {
Map<String, List<Object>> drives;
List<OSFileStore> fs;
String volume;
long free;
long total;
fs = new ArrayList<>();
drives = WmiUtil.selectObjectsFrom(null, "Win32_LogicalDisk", FS_PROPERTIES, null, FS_TYPES);
for (int i = 0; i < drives.get(NAME_PROPERTY).size(); i++) {
free = (Long) drives.get(FREESPACE_PROPERTY).get(i);
total = (Long) drives.get(SIZE_PROPERTY).get(i);
String description = (String) drives.get(DESCRIPTION_PROPERTY).get(i);
String name = (String) drives.get(NAME_PROPERTY).get(i);
long type = (Long) drives.get(DRIVE_TYPE_PROPERTY).get(i);
if (type != 4) {
char[] chrVolume = new char[BUFSIZE];
Kernel32.INSTANCE.GetVolumeNameForVolumeMountPoint(name + "\\", chrVolume, BUFSIZE);
volume = new String(chrVolume).trim();
} else {
volume = (String) drives.get(PROVIDER_NAME_PROPERTY).get(i);
String[] split = volume.split("\\\\");
if (split.length > 1 && split[split.length - 1].length() > 0) {
description = split[split.length - 1];
}
}
fs.add(new OSFileStore(String.format("%s (%s)", description, name), volume, name + "\\", getDriveType(name),
(String) drives.get(FILESYSTEM_PROPERTY).get(i), "", free, total));
}
return fs;
}
|
#vulnerable code
private List<OSFileStore> getWmiVolumes() {
Map<String, List<String>> drives;
List<OSFileStore> fs;
String volume;
long free;
long total;
fs = new ArrayList<>();
drives = WmiUtil.selectStringsFrom(null, "Win32_LogicalDisk",
"Name,Description,ProviderName,FileSystem,Freespace,Size", null);
for (int i = 0; i < drives.get("Name").size(); i++) {
free = ParseUtil.parseLongOrDefault(drives.get("Freespace").get(i), 0L);
total = ParseUtil.parseLongOrDefault(drives.get("Size").get(i), 0L);
String description = drives.get("Description").get(i);
long type = WmiUtil.selectUint32From(null, "Win32_LogicalDisk", "DriveType",
"WHERE Name = '" + drives.get("Name").get(i) + "'");
if (type != 4) {
char[] chrVolume = new char[BUFSIZE];
Kernel32.INSTANCE.GetVolumeNameForVolumeMountPoint(drives.get("Name").get(i) + "\\", chrVolume,
BUFSIZE);
volume = new String(chrVolume).trim();
} else {
volume = drives.get("ProviderName").get(i);
String[] split = volume.split("\\\\");
if (split.length > 1 && split[split.length - 1].length() > 0) {
description = split[split.length - 1];
}
}
fs.add(new OSFileStore(String.format("%s (%s)", description, drives.get("Name").get(i)), volume,
drives.get("Name").get(i) + "\\", getDriveType(drives.get("Name").get(i)),
drives.get("FileSystem").get(i), "", free, total));
}
return fs;
}
#location 16
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
public static <T extends Enum<T>> Pair<List<String>, Map<T, List<Long>>> queryInstancesAndValues(
Class<T> propertyEnum, String perfObject, String perfWmiClass) {
// Check without locking for performance
if (!failedQueryCache.contains(perfObject)) {
failedQueryCacheLock.lock();
try {
// Double check lock
if (!failedQueryCache.contains(perfObject)) {
Pair<List<String>, Map<T, List<Long>>> instancesAndValuesMap = queryInstancesAndValuesFromPDH(
propertyEnum, perfObject);
if (!instancesAndValuesMap.getA().isEmpty()) {
return instancesAndValuesMap;
}
// If we are here, query failed
LOG.warn("Disabling further attempts to query {}.", perfObject);
failedQueryCache.add(perfObject);
}
} finally {
failedQueryCacheLock.unlock();
}
}
return queryInstancesAndValuesFromWMI(propertyEnum, perfWmiClass);
}
|
#vulnerable code
public static <T extends Enum<T>> Pair<List<String>, Map<T, List<Long>>> queryInstancesAndValues(
Class<T> propertyEnum, String perfObject, String perfWmiClass) {
Pair<List<String>, Map<T, List<Long>>> instancesAndValuesMap = queryInstancesAndValuesFromPDH(propertyEnum,
perfObject);
if (instancesAndValuesMap.getA().isEmpty()) {
return queryInstancesAndValuesFromWMI(propertyEnum, perfWmiClass);
}
return instancesAndValuesMap;
}
#location 3
#vulnerability type INTERFACE_NOT_THREAD_SAFE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
public static UsbDevice[] getUsbDevices() {
// Start by collecting information for all PNP devices. While in theory
// these could be individually queried with a WHERE clause, grabbing
// them all up front incurs minimal memory overhead in exchange for
// faster access later
// Clear maps
nameMap.clear();
vendorMap.clear();
serialMap.clear();
// Query Win32_PnPEntity to populate the maps
Map<String, List<String>> usbMap = WmiUtil.selectStringsFrom(null, "Win32_PnPEntity",
"Name,Manufacturer,PnPDeviceID", null);
for (int i = 0; i < usbMap.get("Name").size(); i++) {
String pnpDeviceID = usbMap.get("PnPDeviceID").get(i);
nameMap.put(pnpDeviceID, usbMap.get("Name").get(i));
if (usbMap.get("Manufacturer").get(i).length() > 0) {
vendorMap.put(pnpDeviceID, usbMap.get("Manufacturer").get(i));
}
}
// Get serial # for disk drives or other physical media
usbMap = WmiUtil.selectStringsFrom(null, "Win32_DiskDrive", "PNPDeviceID,SerialNumber", null);
for (int i = 0; i < usbMap.get("PNPDeviceID").size(); i++) {
serialMap.put(usbMap.get("PNPDeviceID").get(i),
ParseUtil.hexStringToString(usbMap.get("PNPDeviceID").get(i)));
}
usbMap = WmiUtil.selectStringsFrom(null, "Win32_PhysicalMedia", "PNPDeviceID,SerialNumber", null);
for (int i = 0; i < usbMap.get("PNPDeviceID").size(); i++) {
serialMap.put(usbMap.get("PNPDeviceID").get(i),
ParseUtil.hexStringToString(usbMap.get("PNPDeviceID").get(i)));
}
// Build the device tree. Start with the USB Controllers
// and recurse downward to devices as needed
usbMap = WmiUtil.selectStringsFrom(null, "Win32_USBController", "PNPDeviceID", null);
List<UsbDevice> controllerDevices = new ArrayList<UsbDevice>();
for (String controllerDeviceId : usbMap.get("PNPDeviceID")) {
putChildrenInDeviceTree(controllerDeviceId, 0);
controllerDevices.add(getDeviceAndChildren(controllerDeviceId));
}
return controllerDevices.toArray(new UsbDevice[controllerDevices.size()]);
}
|
#vulnerable code
public static UsbDevice[] getUsbDevices() {
// Start by collecting information for all PNP devices. While in theory
// these could be individually queried with a WHERE clause, grabbing
// them all up front incurs minimal memory overhead in exchange for
// faster access later
// Clear maps
nameMap.clear();
vendorMap.clear();
serialMap.clear();
// Query Win32_PnPEntity to populate the maps
Map<String, List<String>> usbMap = WmiUtil.selectStringsFrom(null, "Win32_PnPEntity",
"Name,Manufacturer,PnPDeviceID", null);
for (int i = 0; i < usbMap.get("Name").size(); i++) {
String pnpDeviceID = usbMap.get("PnPDeviceID").get(i);
nameMap.put(pnpDeviceID, usbMap.get("Name").get(i));
if (usbMap.get("Manufacturer").get(i).length() > 0) {
vendorMap.put(pnpDeviceID, usbMap.get("Manufacturer").get(i));
}
String serialNumber = "";
// PNPDeviceID: USB\VID_203A&PID_FFF9&MI_00\6&18C4CF61&0&0000
// Split by \ to get bus type (USB), VendorID/ProductID, other info
// As a temporary hack for a serial number, use last \-split field
// using 2nd &-split field if 4 fields
String[] idSplit = pnpDeviceID.split("\\\\");
if (idSplit.length > 2) {
idSplit = idSplit[2].split("&");
if (idSplit.length > 3) {
serialNumber = idSplit[1];
}
}
if (serialNumber.length() > 0) {
serialMap.put(pnpDeviceID, serialNumber);
}
}
// Disk drives or other physical media have a better way of getting
// serial number. Grab these and overwrite the temporary serial number
// assigned above if necessary
usbMap = WmiUtil.selectStringsFrom(null, "Win32_DiskDrive", "PNPDeviceID,SerialNumber", null);
for (int i = 0; i < usbMap.get("PNPDeviceID").size(); i++) {
serialMap.put(usbMap.get("PNPDeviceID").get(i),
ParseUtil.hexStringToString(usbMap.get("PNPDeviceID").get(i)));
}
usbMap = WmiUtil.selectStringsFrom(null, "Win32_PhysicalMedia", "PNPDeviceID,SerialNumber", null);
for (int i = 0; i < usbMap.get("PNPDeviceID").size(); i++) {
serialMap.put(usbMap.get("PNPDeviceID").get(i),
ParseUtil.hexStringToString(usbMap.get("PNPDeviceID").get(i)));
}
// Some USB Devices are hubs to which other devices connect. Knowing
// which ones are hubs will help later when walking the device tree
usbMap = WmiUtil.selectStringsFrom(null, "Win32_USBHub", "PNPDeviceID", null);
List<String> usbHubs = usbMap.get("PNPDeviceID");
// Now build the hub map linking USB devices with their parent hub.
// At the top of the device tree are USB Controllers. All USB hubs and
// devices descend from these. Because this query returns pointers it's
// just not practical to try to query via COM so we use a command line
// in order to get USB devices in a text format
ArrayList<String> links = ExecutingCommand
.runNative("wmic path Win32_USBControllerDevice GET Antecedent,Dependent");
// This iteration actually walks the device tree in order so while the
// antecedent of all USB devices is its controller, we know that if a
// device is not a hub that the last hub listed is its parent
// Devices with PNPDeviceID containing "ROOTHUB" are special and will be
// parents of the next item(s)
// This won't id chained hubs (other than the root hub) but is a quick
// hack rather than walking the entire device tree using the SetupDI API
// and good enough since exactly how a USB device is connected is
// theoretically transparent to the user
hubMap.clear();
String currentHub = null;
String rootHub = null;
for (String s : links) {
String[] split = s.split("\\s+");
if (split.length < 2) {
continue;
}
String antecedent = getId(split[0]);
String dependent = getId(split[1]);
// Ensure initial defaults are sane if something goes wrong
if (currentHub == null || rootHub == null) {
currentHub = antecedent;
rootHub = antecedent;
}
String parent;
if (dependent.contains("ROOT_HUB")) {
// This is a root hub, assign controller as parent;
parent = antecedent;
rootHub = dependent;
currentHub = dependent;
} else if (usbHubs.contains(dependent)) {
// This is a hub, assign parent as root hub
if (rootHub == null) {
rootHub = antecedent;
}
parent = rootHub;
currentHub = dependent;
} else {
// This is not a hub, assign parent as previous hub
if (currentHub == null) {
currentHub = antecedent;
}
parent = currentHub;
}
// Finally add the parent/child linkage to the map
if (!hubMap.containsKey(parent)) {
hubMap.put(parent, new ArrayList<String>());
}
hubMap.get(parent).add(dependent);
}
// Finally we simply get the device IDs of the USB Controllers. These
// will recurse downward to devices as needed
usbMap = WmiUtil.selectStringsFrom(null, "Win32_USBController", "PNPDeviceID", null);
List<UsbDevice> controllerDevices = new ArrayList<UsbDevice>();
for (String controllerDeviceID : usbMap.get("PNPDeviceID")) {
controllerDevices.add(getDeviceAndChildren(controllerDeviceID));
}
return controllerDevices.toArray(new UsbDevice[controllerDevices.size()]);
}
#location 76
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Override
public HWDiskStore[] getDisks() {
List<HWDiskStore> result;
result = new ArrayList<>();
readMap.clear();
writeMap.clear();
populateReadWriteMaps();
Map<String, List<Object>> vals = WmiUtil.selectObjectsFrom(null, "Win32_DiskDrive",
"Name,Manufacturer,Model,SerialNumber,Size,Index", null, DRIVE_TYPES);
for (int i = 0; i < vals.get("Name").size(); i++) {
HWDiskStore ds = new HWDiskStore();
ds.setName((String) vals.get("Name").get(i));
ds.setModel(String.format("%s %s", vals.get("Model").get(i), vals.get("Manufacturer").get(i)).trim());
// Most vendors store serial # as a hex string; convert
ds.setSerial(ParseUtil.hexStringToString((String) vals.get("SerialNumber").get(i)));
String index = vals.get("Index").get(i).toString();
if (readMap.containsKey(index)) {
ds.setReads(readMap.get(index));
}
if (writeMap.containsKey(index)) {
ds.setWrites(writeMap.get(index));
}
// If successful this line is the desired value
try {
ds.setSize(Long.parseLong((String) vals.get("Size").get(i)));
} catch (NumberFormatException e) {
// If we failed to parse, give up
// This is expected for an empty string on some drives
ds.setSize(0L);
}
result.add(ds);
}
return result.toArray(new HWDiskStore[result.size()]);
}
|
#vulnerable code
@Override
public HWDiskStore[] getDisks() {
List<HWDiskStore> result;
result = new ArrayList<>();
Map<String, List<String>> vals = WmiUtil.selectStringsFrom(null, "Win32_DiskDrive",
"Name,Manufacturer,Model,SerialNumber,Size", null);
for (int i = 0; i < vals.get("Name").size(); i++) {
HWDiskStore ds = new HWDiskStore();
ds.setName(vals.get("Name").get(i));
ds.setModel(String.format("%s %s", vals.get("Model").get(i), vals.get("Manufacturer").get(i)).trim());
// Most vendors store serial # as a hex string; convert
ds.setSerial(ParseUtil.hexStringToString(vals.get("SerialNumber").get(i)));
// If successful this line is the desired value
try {
ds.setSize(Long.parseLong(vals.get("Size").get(i)));
} catch (NumberFormatException e) {
// If we failed to parse, give up
// This is expected for an empty string on some drives
ds.setSize(0L);
}
result.add(ds);
}
return result.toArray(new HWDiskStore[result.size()]);
}
#location 13
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
public static ArrayList<String> runNative(String cmdToRun) {
Process p = null;
try {
p = Runtime.getRuntime().exec(cmdToRun);
p.waitFor();
} catch (IOException e) {
return null;
} catch (InterruptedException e) {
e.printStackTrace();
}
BufferedReader reader = new BufferedReader(new InputStreamReader(
p.getInputStream()));
String line = "";
ArrayList<String> sa = new ArrayList<String>();
try {
while ((line = reader.readLine()) != null) {
sa.add(line);
}
} catch (IOException e) {
return null;
}
return sa;
}
|
#vulnerable code
public static ArrayList<String> runNative(String cmdToRun) {
Process p = null;
try {
p = Runtime.getRuntime().exec(cmdToRun);
//p.waitFor();
} catch (IOException e) {
return null;
}
BufferedReader reader = new BufferedReader(new InputStreamReader(
p.getInputStream()));
String line = "";
ArrayList<String> sa = new ArrayList<String>();
try {
while ((line = reader.readLine()) != null) {
sa.add(line);
}
} catch (IOException e) {
return null;
}
p.destroy();
return sa;
}
#location 6
#vulnerability type RESOURCE_LEAK
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Test
public void find() {
final Optional<Integer> result = _.find(asList(1, 2, 3, 4, 5, 6),
new Predicate<Integer>() {
public Boolean apply(Integer item) {
return item % 2 == 0;
}
});
assertEquals("Optional.of(2)", result.toString());
}
|
#vulnerable code
@Test
public void find() {
final Integer result = _.find(asList(1, 2, 3, 4, 5, 6),
new Predicate<Integer>() {
public Boolean apply(Integer item) {
return item % 2 == 0;
}
});
assertEquals("2", result.toString());
}
#location 9
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Test
public void findWhere() {
class Book {
public final String title;
public final String author;
public final Integer year;
public Book(final String title, final String author, final Integer year) {
this.title = title;
this.author = author;
this.year = year;
}
public String toString() {
return "title: " + title + ", author: " + author + ", year: " + year;
}
};
List<Book> listOfPlays =
new ArrayList<Book>() {{
add(new Book("Cymbeline2", "Shakespeare", 1614));
add(new Book("Cymbeline", "Shakespeare", 1611));
add(new Book("The Tempest", "Shakespeare", 1611));
}};
assertEquals("title: Cymbeline, author: Shakespeare, year: 1611",
_.findWhere(listOfPlays, asList(
Tuple.<String, Object>create("author", "Shakespeare"),
Tuple.<String, Object>create("year", Integer.valueOf(1611)))).get().toString());
assertEquals("title: Cymbeline, author: Shakespeare, year: 1611",
_.findWhere(listOfPlays, asList(
Tuple.<String, Object>create("author", "Shakespeare"),
Tuple.<String, Object>create("author2", "Shakespeare"),
Tuple.<String, Object>create("year", Integer.valueOf(1611)))).get().toString());
}
|
#vulnerable code
@Test
public void findWhere() {
class Book {
public final String title;
public final String author;
public final Integer year;
public Book(final String title, final String author, final Integer year) {
this.title = title;
this.author = author;
this.year = year;
}
public String toString() {
return "title: " + title + ", author: " + author + ", year: " + year;
}
};
List<Book> listOfPlays =
new ArrayList<Book>() {{
add(new Book("Cymbeline2", "Shakespeare", 1614));
add(new Book("Cymbeline", "Shakespeare", 1611));
add(new Book("The Tempest", "Shakespeare", 1611));
}};
assertEquals("title: Cymbeline, author: Shakespeare, year: 1611",
_.findWhere(listOfPlays, asList(
Tuple.<String, Object>create("author", "Shakespeare"),
Tuple.<String, Object>create("year", Integer.valueOf(1611)))).toString());
assertEquals("title: Cymbeline, author: Shakespeare, year: 1611",
_.findWhere(listOfPlays, asList(
Tuple.<String, Object>create("author", "Shakespeare"),
Tuple.<String, Object>create("author2", "Shakespeare"),
Tuple.<String, Object>create("year", Integer.valueOf(1611)))).toString());
}
#location 25
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Test
public void singleOrNull() {
U<Integer> uWithMoreElement = new U<>(asList(1, 2, 3));
U<Integer> uWithOneElement = new U<>(singletonList(1));
final Integer result1 = U.singleOrNull(asList(1, 2, 3));
assertNull(result1);
final int result2 = U.singleOrNull(singletonList(1));
assertEquals(1, result2);
final Integer result3 = U.singleOrNull(new ArrayList<>());
assertNull(result3);
final Integer result4 = U.singleOrNull(asList(1, 2, 3), item -> item % 2 == 1);
assertNull(result4);
final int result5 = U.singleOrNull(asList(1, 2, 3), item -> item % 2 == 0);
assertEquals(2, result5);
final Integer result6 = U.singleOrNull(asList(1, 2, 3), item -> item == 5);
assertNull(result6);
final Integer result7 = uWithMoreElement.singleOrNull();
assertNull(result7);
final Integer result8 = uWithOneElement.singleOrNull();
assertEquals(result8, Integer.valueOf(1));
final Integer result9 = uWithMoreElement.singleOrNull(item -> item % 2 == 0);
assertEquals(result9, Integer.valueOf(2));
final Integer result10 = uWithMoreElement.singleOrNull(item -> item % 2 == 1);
assertNull(result10);
}
|
#vulnerable code
@Test
public void singleOrNull() {
U<Integer> uWithMoreElement = new U<>(asList(1, 2, 3));
U<Integer> uWithOneElement = new U<>(asList(1));
final Integer result1 = U.singleOrNull(asList(1, 2, 3));
assertNull(result1);
final int result2 = U.singleOrNull(asList(1));
assertEquals(1, result2);
final Integer result3 = U.singleOrNull(new ArrayList<>());
assertNull(result3);
final Integer result4 = U.singleOrNull(asList(1, 2, 3), item -> item % 2 == 1);
assertNull(result4);
final int result5 = U.singleOrNull(asList(1, 2, 3), item -> item % 2 == 0);
assertEquals(2, result5);
final Integer result6 = U.singleOrNull(asList(1, 2, 3), item -> item == 5);
assertNull(result6);
final Integer result7 = uWithMoreElement.singleOrNull();
assertNull(result7);
final Integer result8 = uWithOneElement.singleOrNull();
assertEquals(result8, Integer.valueOf(1));
final Integer result9 = uWithMoreElement.singleOrNull(item -> item % 2 == 0);
assertEquals(result9, Integer.valueOf(2));
final Integer result10 = uWithMoreElement.singleOrNull(item -> item % 2 == 1);
assertNull(result10);
}
#location 8
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
public static <E, F extends Number> Double average(final Iterable<E> iterable, final Function<E, F> func) {
F sum = sum(iterable, func);
if (sum == null) {
return null;
}
return sum.doubleValue() / size(iterable);
}
|
#vulnerable code
public static <E, F extends Number> Double average(final Iterable<E> iterable, final Function<E, F> func) {
F sum = sum(iterable, func);
return sum.doubleValue() / size(iterable);
}
#location 3
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Test
public void firstOrNull() {
final Integer result = $.firstOrNull(asList(5, 4, 3, 2, 1));
assertEquals("5", result.toString());
final Integer resultObj = new $<Integer>(asList(5, 4, 3, 2, 1)).firstOrNull();
assertEquals("5", resultObj.toString());
final Integer resultChain = $.chain(asList(5, 4, 3, 2, 1)).firstOrNull().item();
assertEquals("5", resultChain.toString());
assertNull($.firstOrNull(Collections.emptyList()));
assertNull(new $<Integer>(Collections.<Integer>emptyList()).firstOrNull());
final int resultPred = $.firstOrNull(asList(5, 4, 3, 2, 1), new Predicate<Integer>() {
public Boolean apply(Integer item) {
return item % 2 == 0;
}
});
assertEquals(4, resultPred);
final int resultPredChain = $.chain(asList(5, 4, 3, 2, 1)).firstOrNull(new Predicate<Integer>() {
public Boolean apply(Integer item) {
return item % 2 == 0;
}
}).item();
assertEquals(4, resultPredChain);
assertNull($.firstOrNull(Collections.<Integer>emptyList(), new Predicate<Integer>() {
public Boolean apply(Integer item) {
return item % 2 == 0;
}
}));
final int resultPredObj = new $<Integer>(asList(5, 4, 3, 2, 1)).firstOrNull(new Predicate<Integer>() {
public Boolean apply(Integer item) {
return item % 2 == 0;
}
});
assertEquals(4, resultPredObj);
assertNull(new $<Integer>(Collections.<Integer>emptyList()).firstOrNull(new Predicate<Integer>() {
public Boolean apply(Integer item) {
return item % 2 == 0;
}
}));
}
|
#vulnerable code
@Test
public void firstOrNull() {
final Integer result = $.firstOrNull(asList(5, 4, 3, 2, 1));
assertEquals("5", result.toString());
final Integer resultObj = new $<Integer>(asList(5, 4, 3, 2, 1)).firstOrNull();
assertEquals("5", resultObj.toString());
assertNull($.firstOrNull(Collections.emptyList()));
assertNull(new $<Integer>(Collections.<Integer>emptyList()).firstOrNull());
final int resultPred = $.firstOrNull(asList(5, 4, 3, 2, 1), new Predicate<Integer>() {
public Boolean apply(Integer item) {
return item % 2 == 0;
}
});
assertEquals(4, resultPred);
assertNull($.firstOrNull(Collections.<Integer>emptyList(), new Predicate<Integer>() {
public Boolean apply(Integer item) {
return item % 2 == 0;
}
}));
final int resultPredObj = new $<Integer>(asList(5, 4, 3, 2, 1)).firstOrNull(new Predicate<Integer>() {
public Boolean apply(Integer item) {
return item % 2 == 0;
}
});
assertEquals(4, resultPredObj);
assertNull(new $<Integer>(Collections.<Integer>emptyList()).firstOrNull(new Predicate<Integer>() {
public Boolean apply(Integer item) {
return item % 2 == 0;
}
}));
}
#location 20
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
public static <T extends Number> double mean(final Iterable<T> iterable) {
T result = null;
int count = 0;
for (final T item : iterable) {
result = add(result, item);
count += 1;
}
if (result == null) {
return 0d;
}
return result.doubleValue() / count;
}
|
#vulnerable code
public static <T extends Number> double mean(final Iterable<T> iterable) {
T result = null;
int count = 0;
for (final T item : iterable) {
result = add(result, item);
count += 1;
}
return result.doubleValue() / count;
}
#location 8
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Test
public void detect() {
final Optional<Integer> result = _.detect(asList(1, 2, 3, 4, 5, 6),
new Predicate<Integer>() {
public Boolean apply(Integer item) {
return item % 2 == 0;
}
});
assertEquals("Optional.of(2)", result.toString());
}
|
#vulnerable code
@Test
public void detect() {
final Integer result = _.detect(asList(1, 2, 3, 4, 5, 6),
new Predicate<Integer>() {
public Boolean apply(Integer item) {
return item % 2 == 0;
}
});
assertEquals("2", result.toString());
}
#location 9
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Test
public void testCreateEdgeBetweenVerticesPropertiesEagerlyLoadedOnHasHas() {
Vertex person1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "john");
Vertex person2 = this.sqlgGraph.addVertex(T.label, "Person", "name", "peter");
this.sqlgGraph.tx().commit();
person1 = this.sqlgGraph.traversal().V(person1.id()).next();
person2 = this.sqlgGraph.traversal().V(person2.id()).next();
person1.addEdge("friend", person2);
Assert.assertEquals("john", person1.value("name"));
Assert.assertEquals("peter", person2.value("name"));
this.sqlgGraph.tx().commit();
List<Vertex> vertices = this.sqlgGraph.traversal().V().<Vertex>has(T.label, "Person").<Vertex>has("name", "john").toList();
Assert.assertEquals(1, vertexTraversal(vertices.get(0)).out("friend").count().next().intValue());
Assert.assertEquals(1, vertices.size());
vertices = this.sqlgGraph.traversal().V().<Vertex>has(T.label, "Person").<Vertex>has("name", "peter").toList();
Assert.assertEquals(1, vertexTraversal(vertices.get(0)).in("friend").count().next().intValue());
Assert.assertEquals(1, vertices.size());
}
|
#vulnerable code
@Test
public void testCreateEdgeBetweenVerticesPropertiesEagerlyLoadedOnHasHas() {
Vertex person1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "john");
Vertex person2 = this.sqlgGraph.addVertex(T.label, "Person", "name", "peter");
this.sqlgGraph.tx().commit();
person1 = this.sqlgGraph.v(person1.id());
person2 = this.sqlgGraph.v(person2.id());
person1.addEdge("friend", person2);
Assert.assertEquals("john", person1.value("name"));
Assert.assertEquals("peter", person2.value("name"));
this.sqlgGraph.tx().commit();
List<Vertex> vertices = this.sqlgGraph.traversal().V().<Vertex>has(T.label, "Person").<Vertex>has("name", "john").toList();
Assert.assertEquals(1, vertexTraversal(vertices.get(0)).out("friend").count().next().intValue());
Assert.assertEquals(1, vertices.size());
vertices = this.sqlgGraph.traversal().V().<Vertex>has(T.label, "Person").<Vertex>has("name", "peter").toList();
Assert.assertEquals(1, vertexTraversal(vertices.get(0)).in("friend").count().next().intValue());
Assert.assertEquals(1, vertices.size());
}
#location 8
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Test
public void testBatchUpdatePersistentVertices() {
Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "a");
Vertex v2 = this.sqlgGraph.addVertex(T.label, "Person", "surname", "b");
this.sqlgGraph.tx().commit();
assertEquals("a", this.sqlgGraph.traversal().V(v1.id()).next().value("name"));
assertEquals("b", this.sqlgGraph.traversal().V(v2.id()).next().value("surname"));
this.sqlgGraph.tx().rollback();
this.sqlgGraph.tx().normalBatchModeOn();
v1.property("name", "aa");
v2.property("surname", "bb");
this.sqlgGraph.tx().commit();
assertEquals("aa", this.sqlgGraph.traversal().V(v1.id()).next().value("name"));
assertEquals("bb", this.sqlgGraph.traversal().V(v2.id()).next().value("surname"));
}
|
#vulnerable code
@Test
public void testBatchUpdatePersistentVertices() {
Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "a");
Vertex v2 = this.sqlgGraph.addVertex(T.label, "Person", "surname", "b");
this.sqlgGraph.tx().commit();
assertEquals("a", this.sqlgGraph.v(v1.id()).value("name"));
assertEquals("b", this.sqlgGraph.v(v2.id()).value("surname"));
this.sqlgGraph.tx().rollback();
this.sqlgGraph.tx().normalBatchModeOn();
v1.property("name", "aa");
v2.property("surname", "bb");
this.sqlgGraph.tx().commit();
assertEquals("aa", this.sqlgGraph.v(v1.id()).value("name"));
assertEquals("bb", this.sqlgGraph.v(v2.id()).value("surname"));
}
#location 6
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Test
public void testBatchUpdatePersistentVerticesAllTypes() {
Assume.assumeTrue(this.sqlgGraph.features().vertex().properties().supportsFloatValues());
Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "a");
Vertex v2 = this.sqlgGraph.addVertex(T.label, "Person", "surname", "b");
this.sqlgGraph.tx().commit();
assertEquals("a", this.sqlgGraph.traversal().V(v1.id()).next().value("name"));
assertEquals("b", this.sqlgGraph.traversal().V(v2.id()).next().value("surname"));
this.sqlgGraph.tx().rollback();
this.sqlgGraph.tx().normalBatchModeOn();
v1.property("name", "aa");
v1.property("boolean", true);
v1.property("short", (short) 1);
v1.property("integer", 1);
v1.property("long", 1L);
v1.property("float", 1F);
v1.property("double", 1D);
v2.property("surname", "bb");
v2.property("boolean", false);
v2.property("short", (short) 2);
v2.property("integer", 2);
v2.property("long", 2L);
v2.property("float", 2F);
v2.property("double", 2D);
this.sqlgGraph.tx().commit();
assertEquals("aa", this.sqlgGraph.traversal().V(v1.id()).next().value("name"));
assertEquals(true, this.sqlgGraph.traversal().V(v1.id()).next().value("boolean"));
assertEquals((short) 1, this.sqlgGraph.traversal().V(v1.id()).next().<Short>value("short").shortValue());
assertEquals(1, this.sqlgGraph.traversal().V(v1.id()).next().<Integer>value("integer").intValue());
assertEquals(1L, this.sqlgGraph.traversal().V(v1.id()).next().<Long>value("long").longValue(), 0);
assertEquals(1F, this.sqlgGraph.traversal().V(v1.id()).next().<Float>value("float").floatValue(), 0);
assertEquals(1D, this.sqlgGraph.traversal().V(v1.id()).next().<Double>value("double").doubleValue(), 0);
assertEquals("bb", this.sqlgGraph.traversal().V(v2.id()).next().value("surname"));
assertEquals(false, this.sqlgGraph.traversal().V(v2.id()).next().value("boolean"));
assertEquals((short) 2, this.sqlgGraph.traversal().V(v2.id()).next().<Short>value("short").shortValue());
assertEquals(2, this.sqlgGraph.traversal().V(v2.id()).next().<Integer>value("integer").intValue());
assertEquals(2L, this.sqlgGraph.traversal().V(v2.id()).next().<Long>value("long").longValue(), 0);
assertEquals(2F, this.sqlgGraph.traversal().V(v2.id()).next().<Float>value("float").floatValue(), 0);
assertEquals(2D, this.sqlgGraph.traversal().V(v2.id()).next().<Double>value("double").doubleValue(), 0);
}
|
#vulnerable code
@Test
public void testBatchUpdatePersistentVerticesAllTypes() {
Assume.assumeTrue(this.sqlgGraph.features().vertex().properties().supportsFloatValues());
Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "a");
Vertex v2 = this.sqlgGraph.addVertex(T.label, "Person", "surname", "b");
this.sqlgGraph.tx().commit();
assertEquals("a", this.sqlgGraph.v(v1.id()).value("name"));
assertEquals("b", this.sqlgGraph.v(v2.id()).value("surname"));
this.sqlgGraph.tx().rollback();
this.sqlgGraph.tx().normalBatchModeOn();
v1.property("name", "aa");
v1.property("boolean", true);
v1.property("short", (short) 1);
v1.property("integer", 1);
v1.property("long", 1L);
v1.property("float", 1F);
v1.property("double", 1D);
v2.property("surname", "bb");
v2.property("boolean", false);
v2.property("short", (short) 2);
v2.property("integer", 2);
v2.property("long", 2L);
v2.property("float", 2F);
v2.property("double", 2D);
this.sqlgGraph.tx().commit();
assertEquals("aa", this.sqlgGraph.v(v1.id()).value("name"));
assertEquals(true, this.sqlgGraph.v(v1.id()).value("boolean"));
assertEquals((short) 1, this.sqlgGraph.v(v1.id()).<Short>value("short").shortValue());
assertEquals(1, this.sqlgGraph.v(v1.id()).<Integer>value("integer").intValue());
assertEquals(1L, this.sqlgGraph.v(v1.id()).<Long>value("long").longValue(), 0);
assertEquals(1F, this.sqlgGraph.v(v1.id()).<Float>value("float").floatValue(), 0);
assertEquals(1D, this.sqlgGraph.v(v1.id()).<Double>value("double").doubleValue(), 0);
assertEquals("bb", this.sqlgGraph.v(v2.id()).value("surname"));
assertEquals(false, this.sqlgGraph.v(v2.id()).value("boolean"));
assertEquals((short) 2, this.sqlgGraph.v(v2.id()).<Short>value("short").shortValue());
assertEquals(2, this.sqlgGraph.v(v2.id()).<Integer>value("integer").intValue());
assertEquals(2L, this.sqlgGraph.v(v2.id()).<Long>value("long").longValue(), 0);
assertEquals(2F, this.sqlgGraph.v(v2.id()).<Float>value("float").floatValue(), 0);
assertEquals(2D, this.sqlgGraph.v(v2.id()).<Double>value("double").doubleValue(), 0);
}
#location 9
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Test
public void testLoadVertexProperties() {
Vertex marko = this.sqlgGraph.addVertex(T.label, "Person", "name", "marko");
this.sqlgGraph.tx().commit();
marko = this.sqlgGraph.traversal().V(marko.id()).next();
Assert.assertEquals("marko", marko.property("name").value());
}
|
#vulnerable code
@Test
public void testLoadVertexProperties() {
Vertex marko = this.sqlgGraph.addVertex(T.label, "Person", "name", "marko");
this.sqlgGraph.tx().commit();
marko = this.sqlgGraph.v(marko.id());
Assert.assertEquals("marko", marko.property("name").value());
}
#location 6
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Test
public void testCreateEdgeBetweenVerticesPropertiesEagerlyLoadedOnHas() {
Vertex person1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "john");
Vertex person2 = this.sqlgGraph.addVertex(T.label, "Person", "name", "peter");
this.sqlgGraph.tx().commit();
person1 = this.sqlgGraph.traversal().V(person1.id()).next();
person2 = this.sqlgGraph.traversal().V(person2.id()).next();
person1.addEdge("friend", person2);
Assert.assertEquals("john", person1.value("name"));
Assert.assertEquals("peter", person2.value("name"));
this.sqlgGraph.tx().commit();
List<Vertex> vertices = this.sqlgGraph.traversal().V().<Vertex>has(T.label, "Person").toList();
Assert.assertEquals(1, vertexTraversal(vertices.get(0)).out("friend").count().next().intValue());
Assert.assertEquals(1, vertexTraversal(vertices.get(1)).in("friend").count().next().intValue());
Assert.assertEquals(2, vertices.size());
}
|
#vulnerable code
@Test
public void testCreateEdgeBetweenVerticesPropertiesEagerlyLoadedOnHas() {
Vertex person1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "john");
Vertex person2 = this.sqlgGraph.addVertex(T.label, "Person", "name", "peter");
this.sqlgGraph.tx().commit();
person1 = this.sqlgGraph.v(person1.id());
person2 = this.sqlgGraph.v(person2.id());
person1.addEdge("friend", person2);
Assert.assertEquals("john", person1.value("name"));
Assert.assertEquals("peter", person2.value("name"));
this.sqlgGraph.tx().commit();
List<Vertex> vertices = this.sqlgGraph.traversal().V().<Vertex>has(T.label, "Person").toList();
Assert.assertEquals(1, vertexTraversal(vertices.get(0)).out("friend").count().next().intValue());
Assert.assertEquals(1, vertexTraversal(vertices.get(1)).in("friend").count().next().intValue());
Assert.assertEquals(2, vertices.size());
}
#location 10
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Test
public void testBatchUpdatePersistentVertices() {
Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "a");
Vertex v2 = this.sqlgGraph.addVertex(T.label, "Person", "surname", "b");
this.sqlgGraph.tx().commit();
assertEquals("a", this.sqlgGraph.traversal().V(v1.id()).next().value("name"));
assertEquals("b", this.sqlgGraph.traversal().V(v2.id()).next().value("surname"));
this.sqlgGraph.tx().rollback();
this.sqlgGraph.tx().normalBatchModeOn();
v1.property("name", "aa");
v2.property("surname", "bb");
this.sqlgGraph.tx().commit();
assertEquals("aa", this.sqlgGraph.traversal().V(v1.id()).next().value("name"));
assertEquals("bb", this.sqlgGraph.traversal().V(v2.id()).next().value("surname"));
}
|
#vulnerable code
@Test
public void testBatchUpdatePersistentVertices() {
Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "a");
Vertex v2 = this.sqlgGraph.addVertex(T.label, "Person", "surname", "b");
this.sqlgGraph.tx().commit();
assertEquals("a", this.sqlgGraph.v(v1.id()).value("name"));
assertEquals("b", this.sqlgGraph.v(v2.id()).value("surname"));
this.sqlgGraph.tx().rollback();
this.sqlgGraph.tx().normalBatchModeOn();
v1.property("name", "aa");
v2.property("surname", "bb");
this.sqlgGraph.tx().commit();
assertEquals("aa", this.sqlgGraph.v(v1.id()).value("name"));
assertEquals("bb", this.sqlgGraph.v(v2.id()).value("surname"));
}
#location 7
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Test
public void testCreateEdgeBetweenVerticesPropertiesEagerlyLoadedOnHas() {
Vertex person1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "john");
Vertex person2 = this.sqlgGraph.addVertex(T.label, "Person", "name", "peter");
this.sqlgGraph.tx().commit();
person1 = this.sqlgGraph.traversal().V(person1.id()).next();
person2 = this.sqlgGraph.traversal().V(person2.id()).next();
person1.addEdge("friend", person2);
Assert.assertEquals("john", person1.value("name"));
Assert.assertEquals("peter", person2.value("name"));
this.sqlgGraph.tx().commit();
List<Vertex> vertices = this.sqlgGraph.traversal().V().<Vertex>has(T.label, "Person").toList();
Assert.assertEquals(1, vertexTraversal(vertices.get(0)).out("friend").count().next().intValue());
Assert.assertEquals(1, vertexTraversal(vertices.get(1)).in("friend").count().next().intValue());
Assert.assertEquals(2, vertices.size());
}
|
#vulnerable code
@Test
public void testCreateEdgeBetweenVerticesPropertiesEagerlyLoadedOnHas() {
Vertex person1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "john");
Vertex person2 = this.sqlgGraph.addVertex(T.label, "Person", "name", "peter");
this.sqlgGraph.tx().commit();
person1 = this.sqlgGraph.v(person1.id());
person2 = this.sqlgGraph.v(person2.id());
person1.addEdge("friend", person2);
Assert.assertEquals("john", person1.value("name"));
Assert.assertEquals("peter", person2.value("name"));
this.sqlgGraph.tx().commit();
List<Vertex> vertices = this.sqlgGraph.traversal().V().<Vertex>has(T.label, "Person").toList();
Assert.assertEquals(1, vertexTraversal(vertices.get(0)).out("friend").count().next().intValue());
Assert.assertEquals(1, vertexTraversal(vertices.get(1)).in("friend").count().next().intValue());
Assert.assertEquals(2, vertices.size());
}
#location 8
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Test
public void testMultipleReferencesToSameVertex2Instances() {
Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "john");
this.sqlgGraph.tx().commit();
//_v1 is in the transaction cache
//v1 is not
Vertex _v1 = this.sqlgGraph.traversal().V(v1.id()).next();
Assert.assertEquals("john", v1.value("name"));
Assert.assertEquals("john", _v1.value("name"));
v1.property("name", "john1");
Assert.assertEquals("john1", v1.value("name"));
Assert.assertEquals("john1", _v1.value("name"));
}
|
#vulnerable code
@Test
public void testMultipleReferencesToSameVertex2Instances() {
Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "john");
this.sqlgGraph.tx().commit();
//_v1 is in the transaction cache
//v1 is not
Vertex _v1 = this.sqlgGraph.v(v1.id());
Assert.assertEquals("john", v1.value("name"));
Assert.assertEquals("john", _v1.value("name"));
v1.property("name", "john1");
Assert.assertEquals("john1", v1.value("name"));
Assert.assertEquals("john1", _v1.value("name"));
}
#location 9
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Test
public void testCreateEdgeBetweenVerticesPropertiesEagerlyLoadedOnHasHas() {
Vertex person1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "john");
Vertex person2 = this.sqlgGraph.addVertex(T.label, "Person", "name", "peter");
this.sqlgGraph.tx().commit();
person1 = this.sqlgGraph.traversal().V(person1.id()).next();
person2 = this.sqlgGraph.traversal().V(person2.id()).next();
person1.addEdge("friend", person2);
Assert.assertEquals("john", person1.value("name"));
Assert.assertEquals("peter", person2.value("name"));
this.sqlgGraph.tx().commit();
List<Vertex> vertices = this.sqlgGraph.traversal().V().<Vertex>has(T.label, "Person").<Vertex>has("name", "john").toList();
Assert.assertEquals(1, vertexTraversal(vertices.get(0)).out("friend").count().next().intValue());
Assert.assertEquals(1, vertices.size());
vertices = this.sqlgGraph.traversal().V().<Vertex>has(T.label, "Person").<Vertex>has("name", "peter").toList();
Assert.assertEquals(1, vertexTraversal(vertices.get(0)).in("friend").count().next().intValue());
Assert.assertEquals(1, vertices.size());
}
|
#vulnerable code
@Test
public void testCreateEdgeBetweenVerticesPropertiesEagerlyLoadedOnHasHas() {
Vertex person1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "john");
Vertex person2 = this.sqlgGraph.addVertex(T.label, "Person", "name", "peter");
this.sqlgGraph.tx().commit();
person1 = this.sqlgGraph.v(person1.id());
person2 = this.sqlgGraph.v(person2.id());
person1.addEdge("friend", person2);
Assert.assertEquals("john", person1.value("name"));
Assert.assertEquals("peter", person2.value("name"));
this.sqlgGraph.tx().commit();
List<Vertex> vertices = this.sqlgGraph.traversal().V().<Vertex>has(T.label, "Person").<Vertex>has("name", "john").toList();
Assert.assertEquals(1, vertexTraversal(vertices.get(0)).out("friend").count().next().intValue());
Assert.assertEquals(1, vertices.size());
vertices = this.sqlgGraph.traversal().V().<Vertex>has(T.label, "Person").<Vertex>has("name", "peter").toList();
Assert.assertEquals(1, vertexTraversal(vertices.get(0)).in("friend").count().next().intValue());
Assert.assertEquals(1, vertices.size());
}
#location 10
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Test
public void testLoadingDatasourceFromJndi() throws Exception {
SqlgGraph g = SqlgGraph.open(configuration);
assertNotNull(g.getSqlDialect());
assertEquals(configuration.getString("jdbc.url"), g.getJdbcUrl());
assertNotNull(g.getConnection());
}
|
#vulnerable code
@Test
public void testLoadingDatasourceFromJndi() throws Exception {
SqlgGraph g = SqlgGraph.open(configuration);
assertNotNull(g.getSqlDialect());
assertNotNull(g.getSqlgDataSource().get(configuration.getString("jdbc.url")));
}
#location 5
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Test
public void testLoadingLocalDate() throws Exception {
Vertex v = this.sqlgGraph.addVertex(T.label, "Person", "createOn", LocalDate.now());
this.sqlgGraph.tx().commit();
this.sqlgGraph.close();
//noinspection Duplicates
try (SqlgGraph sqlgGraph1 = SqlgGraph.open(configuration)) {
Vertex vv = sqlgGraph1.traversal().V(v.id()).next();
Assert.assertTrue(vv.property("createOn").isPresent());
Map<String, PropertyType> propertyTypeMap = sqlgGraph1.getTopology().getAllTables().get(SchemaTable.of(
sqlgGraph1.getSqlDialect().getPublicSchema(), "V_Person").toString());
Assert.assertTrue(propertyTypeMap.containsKey("createOn"));
sqlgGraph1.tx().rollback();
}
}
|
#vulnerable code
@Test
public void testLoadingLocalDate() throws Exception {
Vertex v = this.sqlgGraph.addVertex(T.label, "Person", "createOn", LocalDate.now());
this.sqlgGraph.tx().commit();
this.sqlgGraph.close();
//noinspection Duplicates
try (SqlgGraph sqlgGraph1 = SqlgGraph.open(configuration)) {
Vertex vv = sqlgGraph1.traversal().V(v.id()).next();
assertTrue(vv.property("createOn").isPresent());
Map<String, PropertyType> propertyTypeMap = sqlgGraph1.getTopology().getAllTables().get(SchemaTable.of(
sqlgGraph1.getSqlDialect().getPublicSchema(), "V_Person").toString());
assertTrue(propertyTypeMap.containsKey("createOn"));
sqlgGraph1.tx().rollback();
}
}
#location 10
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Test
public void testLoadPropertiesOnUpdate() {
Vertex vertex = this.sqlgGraph.addVertex(T.label, "Person", "property1", "a", "property2", "b");
this.sqlgGraph.tx().commit();
vertex = this.sqlgGraph.traversal().V(vertex.id()).next();
vertex.property("property1", "aa");
assertEquals("b", vertex.value("property2"));
}
|
#vulnerable code
@Test
public void testLoadPropertiesOnUpdate() {
Vertex vertex = this.sqlgGraph.addVertex(T.label, "Person", "property1", "a", "property2", "b");
this.sqlgGraph.tx().commit();
vertex = this.sqlgGraph.v(vertex.id());
vertex.property("property1", "aa");
assertEquals("b", vertex.value("property2"));
}
#location 7
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Override
public Map<String, Set<IndexRef>> extractIndices(Connection conn, String catalog, String schema) throws SQLException{
// copied and simplified from the postgres JDBC driver class (PgDatabaseMetaData)
String sql = "SELECT NULL AS TABLE_CAT, n.nspname AS TABLE_SCHEM, "
+ " ct.relname AS TABLE_NAME, NOT i.indisunique AS NON_UNIQUE, "
+ " NULL AS INDEX_QUALIFIER, ci.relname AS INDEX_NAME, "
+ " CASE i.indisclustered "
+ " WHEN true THEN " + java.sql.DatabaseMetaData.tableIndexClustered
+ " ELSE CASE am.amname "
+ " WHEN 'hash' THEN " + java.sql.DatabaseMetaData.tableIndexHashed
+ " ELSE " + java.sql.DatabaseMetaData.tableIndexOther
+ " END "
+ " END AS TYPE, "
+ " (i.keys).n AS ORDINAL_POSITION, "
+ " trim(both '\"' from pg_catalog.pg_get_indexdef(ci.oid, (i.keys).n, false)) AS COLUMN_NAME "
+ "FROM pg_catalog.pg_class ct "
+ " JOIN pg_catalog.pg_namespace n ON (ct.relnamespace = n.oid) "
+ " JOIN (SELECT i.indexrelid, i.indrelid, i.indoption, "
+ " i.indisunique, i.indisclustered, i.indpred, "
+ " i.indexprs, "
+ " information_schema._pg_expandarray(i.indkey) AS keys "
+ " FROM pg_catalog.pg_index i) i "
+ " ON (ct.oid = i.indrelid) "
+ " JOIN pg_catalog.pg_class ci ON (ci.oid = i.indexrelid) "
+ " JOIN pg_catalog.pg_am am ON (ci.relam = am.oid) "
+ "WHERE true ";
if (schema != null && !"".equals(schema)) {
sql += " AND n.nspname = " + maybeWrapInQoutes(schema);
} else {
// exclude schemas we know we're not interested in
sql += " AND n.nspname <> 'pg_catalog' AND n.nspname <> 'pg_toast' AND n.nspname <> '"+SQLG_SCHEMA+"'";
}
sql += " ORDER BY NON_UNIQUE, TYPE, INDEX_NAME, ORDINAL_POSITION ";
try (Statement s=conn.createStatement()){
try (ResultSet indexRs=s.executeQuery(sql)){
Map<String, Set<IndexRef>> ret=new HashMap<>();
String lastKey=null;
String lastIndexName=null;
IndexType lastIndexType=null;
List<String> lastColumns=new LinkedList<>();
while (indexRs.next()){
String cat=indexRs.getString("TABLE_CAT");
String sch=indexRs.getString("TABLE_SCHEM");
String tbl=indexRs.getString("TABLE_NAME");
String key=cat+"."+sch+"."+tbl;
String indexName=indexRs.getString("INDEX_NAME");
boolean nonUnique=indexRs.getBoolean("NON_UNIQUE");
if (lastIndexName==null){
lastIndexName=indexName;
lastIndexType=nonUnique?IndexType.NON_UNIQUE:IndexType.UNIQUE;
lastKey=key;
} else if (!lastIndexName.equals(indexName)){
if (!lastIndexName.endsWith("_pkey") && !lastIndexName.endsWith("_idx")){
if (!Schema.GLOBAL_UNIQUE_INDEX_SCHEMA.equals(schema)){
//System.out.println(lastColumns);
//TopologyManager.addGlobalUniqueIndex(sqlgGraph,lastIndexName,lastColumns);
//} else {
MultiMap.put(ret, lastKey, new IndexRef(lastIndexName,lastIndexType,lastColumns));
}
}
lastColumns.clear();
lastIndexName=indexName;
lastIndexType=nonUnique?IndexType.NON_UNIQUE:IndexType.UNIQUE;
}
lastColumns.add(indexRs.getString("COLUMN_NAME"));
lastKey=key;
}
if (lastIndexName!=null && !lastIndexName.endsWith("_pkey") && !lastIndexName.endsWith("_idx")){
if (!Schema.GLOBAL_UNIQUE_INDEX_SCHEMA.equals(schema)){
//System.out.println(lastColumns);
//TopologyManager.addGlobalUniqueIndex(sqlgGraph,lastIndexName,lastColumns);
//} else {
MultiMap.put(ret, lastKey, new IndexRef(lastIndexName,lastIndexType,lastColumns));
}
}
return ret;
}
}
}
|
#vulnerable code
@Override
public Map<String, Set<IndexRef>> extractIndices(Connection conn, String catalog, String schema) throws SQLException{
// copied and simplified from the postgres JDBC driver class (PgDatabaseMetaData)
String sql = "SELECT NULL AS TABLE_CAT, n.nspname AS TABLE_SCHEM, "
+ " ct.relname AS TABLE_NAME, NOT i.indisunique AS NON_UNIQUE, "
+ " NULL AS INDEX_QUALIFIER, ci.relname AS INDEX_NAME, "
+ " CASE i.indisclustered "
+ " WHEN true THEN " + java.sql.DatabaseMetaData.tableIndexClustered
+ " ELSE CASE am.amname "
+ " WHEN 'hash' THEN " + java.sql.DatabaseMetaData.tableIndexHashed
+ " ELSE " + java.sql.DatabaseMetaData.tableIndexOther
+ " END "
+ " END AS TYPE, "
+ " (i.keys).n AS ORDINAL_POSITION, "
+ " trim(both '\"' from pg_catalog.pg_get_indexdef(ci.oid, (i.keys).n, false)) AS COLUMN_NAME "
+ "FROM pg_catalog.pg_class ct "
+ " JOIN pg_catalog.pg_namespace n ON (ct.relnamespace = n.oid) "
+ " JOIN (SELECT i.indexrelid, i.indrelid, i.indoption, "
+ " i.indisunique, i.indisclustered, i.indpred, "
+ " i.indexprs, "
+ " information_schema._pg_expandarray(i.indkey) AS keys "
+ " FROM pg_catalog.pg_index i) i "
+ " ON (ct.oid = i.indrelid) "
+ " JOIN pg_catalog.pg_class ci ON (ci.oid = i.indexrelid) "
+ " JOIN pg_catalog.pg_am am ON (ci.relam = am.oid) "
+ "WHERE true ";
if (schema != null && !"".equals(schema)) {
sql += " AND n.nspname = " + maybeWrapInQoutes(schema);
}
sql += " ORDER BY NON_UNIQUE, TYPE, INDEX_NAME, ORDINAL_POSITION ";
try (Statement s=conn.createStatement()){
try (ResultSet indexRs=s.executeQuery(sql)){
Map<String, Set<IndexRef>> ret=new HashMap<>();
String lastKey=null;
String lastIndexName=null;
IndexType lastIndexType=null;
List<String> lastColumns=new LinkedList<>();
while (indexRs.next()){
String cat=indexRs.getString("TABLE_CAT");
String sch=indexRs.getString("TABLE_SCHEM");
String tbl=indexRs.getString("TABLE_NAME");
String key=cat+"."+sch+"."+tbl;
String indexName=indexRs.getString("INDEX_NAME");
boolean nonUnique=indexRs.getBoolean("NON_UNIQUE");
if (lastIndexName==null){
lastIndexName=indexName;
lastIndexType=nonUnique?IndexType.NON_UNIQUE:IndexType.UNIQUE;
lastKey=key;
} else if (!lastIndexName.equals(indexName)){
if (!lastIndexName.endsWith("_pkey") && !lastIndexName.endsWith("_idx")){
if (!Schema.GLOBAL_UNIQUE_INDEX_SCHEMA.equals(schema)){
//System.out.println(lastColumns);
//TopologyManager.addGlobalUniqueIndex(sqlgGraph,lastIndexName,lastColumns);
//} else {
MultiMap.put(ret, lastKey, new IndexRef(lastIndexName,lastIndexType,lastColumns));
}
}
lastColumns.clear();
lastIndexName=indexName;
lastIndexType=nonUnique?IndexType.NON_UNIQUE:IndexType.UNIQUE;
}
lastColumns.add(indexRs.getString("COLUMN_NAME"));
lastKey=key;
}
if (!lastIndexName.endsWith("_pkey") && !lastIndexName.endsWith("_idx")){
if (!Schema.GLOBAL_UNIQUE_INDEX_SCHEMA.equals(schema)){
//System.out.println(lastColumns);
//TopologyManager.addGlobalUniqueIndex(sqlgGraph,lastIndexName,lastColumns);
//} else {
MultiMap.put(ret, lastKey, new IndexRef(lastIndexName,lastIndexType,lastColumns));
}
}
return ret;
}
}
}
#location 53
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Test
public void testMultipleReferencesToSameVertex() {
Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "john");
this.sqlgGraph.tx().commit();
Assert.assertEquals("john", v1.value("name"));
//_v1 is in the transaction cache
//v1 is not
Vertex _v1 = this.sqlgGraph.traversal().V(v1.id()).next();
Assert.assertEquals("john", _v1.value("name"));
v1.property("name", "john1");
Assert.assertEquals("john1", v1.value("name"));
Assert.assertEquals("john1", _v1.value("name"));
}
|
#vulnerable code
@Test
public void testMultipleReferencesToSameVertex() {
Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "john");
this.sqlgGraph.tx().commit();
Assert.assertEquals("john", v1.value("name"));
//_v1 is in the transaction cache
//v1 is not
Vertex _v1 = this.sqlgGraph.v(v1.id());
Assert.assertEquals("john", _v1.value("name"));
v1.property("name", "john1");
Assert.assertEquals("john1", v1.value("name"));
Assert.assertEquals("john1", _v1.value("name"));
}
#location 9
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@BeforeClass
public static void beforeClass() throws Exception {
URL sqlProperties = Thread.currentThread().getContextClassLoader().getResource("sqlg.properties");
configuration = new PropertiesConfiguration(sqlProperties);
if (!configuration.containsKey("jdbc.url")) {
throw new IllegalArgumentException(String.format("SqlGraph configuration requires that the %s be set", "jdbc.url"));
}
String url = configuration.getString("jdbc.url");
//obtain the connection that we will later supply from JNDI
ds = new C3p0DataSourceFactory().setup(url, configuration).getDatasource();
//change the connection url to be a JNDI one
configuration.setProperty("jdbc.url", "jndi:testConnection");
//set up the initial context
NamingManager.setInitialContextFactoryBuilder(environment -> {
InitialContextFactory mockFactory = mock(InitialContextFactory.class);
Context mockContext = mock(Context.class);
when(mockFactory.getInitialContext(any())).thenReturn(mockContext);
when(mockContext.lookup("testConnection")).thenReturn(ds);
return mockFactory;
});
}
|
#vulnerable code
@BeforeClass
public static void beforeClass() throws ClassNotFoundException, IOException, PropertyVetoException, NamingException, ConfigurationException {
URL sqlProperties = Thread.currentThread().getContextClassLoader().getResource("sqlg.properties");
configuration = new PropertiesConfiguration(sqlProperties);
if (!configuration.containsKey("jdbc.url")) {
throw new IllegalArgumentException(String.format("SqlGraph configuration requires that the %s be set", "jdbc.url"));
}
String url = configuration.getString("jdbc.url");
//obtain the connection that we will later supply from JNDI
SqlgGraph g = SqlgGraph.open(configuration);
ds = g.getSqlgDataSource().get(url);
// g.getTopology().close();
//change the connection url to be a JNDI one
configuration.setProperty("jdbc.url", "jndi:testConnection");
//set up the initial context
NamingManager.setInitialContextFactoryBuilder(environment -> {
InitialContextFactory mockFactory = mock(InitialContextFactory.class);
Context mockContext = mock(Context.class);
when(mockFactory.getInitialContext(any())).thenReturn(mockContext);
when(mockContext.lookup("testConnection")).thenReturn(ds);
return mockFactory;
});
}
#location 13
#vulnerability type RESOURCE_LEAK
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Test
public void testPropertiesNotBeingCachedOnVertexOut() {
Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person");
Vertex v2 = this.sqlgGraph.addVertex(T.label, "Car", "name", "a");
Vertex v3 = this.sqlgGraph.addVertex(T.label, "Car", "name", "b");
Vertex v4 = this.sqlgGraph.addVertex(T.label, "Car", "name", "c");
v1.addEdge("car", v2);
v1.addEdge("car", v3);
v1.addEdge("car", v4);
this.sqlgGraph.tx().commit();
v1 = this.sqlgGraph.traversal().V(v1.id()).next();
List<Vertex> cars = vertexTraversal(v1).out("car").toList();
Assert.assertEquals(3, cars.size());
}
|
#vulnerable code
@Test
public void testPropertiesNotBeingCachedOnVertexOut() {
Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person");
Vertex v2 = this.sqlgGraph.addVertex(T.label, "Car", "name", "a");
Vertex v3 = this.sqlgGraph.addVertex(T.label, "Car", "name", "b");
Vertex v4 = this.sqlgGraph.addVertex(T.label, "Car", "name", "c");
v1.addEdge("car", v2);
v1.addEdge("car", v3);
v1.addEdge("car", v4);
this.sqlgGraph.tx().commit();
v1 = this.sqlgGraph.v(v1.id());
List<Vertex> cars = vertexTraversal(v1).out("car").toList();
Assert.assertEquals(3, cars.size());
}
#location 16
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@POST
@Path(value = "test")
@Produces(MediaType.APPLICATION_JSON)
public Map<String, Object> test(@Context Repository repository,
@FormParam(Notifier.JENKINS_BASE) String jenkinsBase,
@FormParam(Notifier.CLONE_TYPE) String cloneType,
@FormParam(Notifier.CLONE_URL) String cloneUrl,
@FormParam(Notifier.IGNORE_CERTS) boolean ignoreCerts,
@FormParam(Notifier.OMIT_HASH_CODE) boolean omitHashCode) {
if (jenkinsBase == null || cloneType == null || (cloneType.equals("custom") && cloneUrl == null)) {
Map<String, Object> map = new HashMap<String, Object>();
map.put("successful", false);
map.put("message", "Settings must be configured");
return map;
}
permissionService.validateForRepository(repository, Permission.REPO_ADMIN);
log.debug("Triggering jenkins notification for repository {}/{}",
repository.getProject().getKey(), repository.getSlug());
/* @todo [email protected]: Send null instead of master and sha1 and
* handle this in notify
*/
NotificationResult result = notifier.notify(repository, jenkinsBase,
ignoreCerts, cloneType, cloneUrl, null, null, omitHashCode, true);
log.debug("Got response from jenkins: {}", result);
// Shouldn't have to do this but the result isn't being marshalled correctly
Map<String, Object> map = new HashMap<String, Object>();
map.put("successful", result.isSuccessful());
map.put("url", result.getUrl());
map.put("message", result.getMessage());
return map;
}
|
#vulnerable code
@POST
@Path(value = "test")
@Produces(MediaType.APPLICATION_JSON)
public Map<String, Object> test(@Context Repository repository,
@FormParam(Notifier.JENKINS_BASE) String jenkinsBase,
@FormParam(Notifier.CLONE_TYPE) String cloneType,
@FormParam(Notifier.CLONE_URL) String cloneUrl,
@FormParam(Notifier.IGNORE_CERTS) boolean ignoreCerts,
@FormParam(Notifier.OMIT_HASH_CODE) boolean omitHashCode) {
if (jenkinsBase == null || cloneType == null || (cloneType.equals("custom") && cloneUrl == null)) {
Map<String, Object> map = new HashMap<String, Object>();
map.put("successful", false);
map.put("message", "Settings must be configured");
return map;
}
permissionService.validateForRepository(repository, Permission.REPO_ADMIN);
log.debug("Triggering jenkins notification for repository {}/{}",
repository.getProject().getKey(), repository.getSlug());
/* @todo [email protected]: Send null instead of master and sha1 and
* handle this in notify
*/
NotificationResult result = notifier.notify(repository, jenkinsBase,
ignoreCerts, cloneType, cloneUrl, null, null, omitHashCode);
log.debug("Got response from jenkins: {}", result);
// Shouldn't have to do this but the result isn't being marshalled correctly
Map<String, Object> map = new HashMap<String, Object>();
map.put("successful", result.isSuccessful());
map.put("url", result.getUrl());
map.put("message", result.getMessage());
return map;
}
#location 31
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
public static void main(String[] args) throws Exception {
try (MongoClient client = new MongoClient(Connection.URI)) {
MongoCollection<Document> eventCollection =
client.getDatabase("test").getCollection("events");
long i = 0;
while (true) {
Document doc = new Document();
doc.put("i", i++);
doc.put("even", i % 2);
eventCollection.insertOne(doc);
//System.out.println("inserted: " + doc);
Thread.sleep(2000L + (long)(1000*Math.random()));
}
}
}
|
#vulnerable code
public static void main(String[] args) throws Exception {
MongoCollection<Document> eventCollection =
new MongoClient(
new MongoClientURI("mongodb://localhost:27001,localhost:27002,localhost:27003/test?replicatSet=demo-dev")
).getDatabase("test").getCollection("events");
long i = 0;
while (true) {
Document doc = new Document();
doc.put("i", i++);
doc.put("even", i % 2);
eventCollection.insertOne(doc);
//System.out.println("inserted: " + doc);
Thread.sleep(2000L + (long)(1000*Math.random()));
}
}
#location 6
#vulnerability type RESOURCE_LEAK
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
public static void main(String[] args) throws Exception {
try (MongoClient client = new MongoClient(Connection.URI)) {
MongoCollection<Document> eventCollection =
client.getDatabase("test").getCollection("events");
ChangeStreamIterable<Document> changes = eventCollection.watch(asList(
Aggregates.match( and( asList(
in("operationType", asList("insert")),
eq("fullDocument.even", 1L)))
)));
changes.iterator().forEachRemaining(
change -> System.out.println("received: " + change.getFullDocument())
);
}
}
|
#vulnerable code
public static void main(String[] args) throws Exception {
MongoCollection<Document> eventCollection =
new MongoClient(
new MongoClientURI("mongodb://localhost:27001,localhost:27002,localhost:27003/test?replicatSet=demo-dev")
).getDatabase("test").getCollection("events");
ChangeStreamIterable<Document> changes = eventCollection.watch(asList(
Aggregates.match( and( asList(
in("operationType", asList("insert")),
eq("fullDocument.even", 1L)))
)));
changes.forEach(new Block<ChangeStreamDocument<Document>>() {
@Override
public void apply(ChangeStreamDocument<Document> t) {
System.out.println("received: " + t.getFullDocument());
}
});
}
#location 6
#vulnerability type RESOURCE_LEAK
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Override
public void start(int port, SSLContext context, boolean authReq) throws IOException {
ContainerSocketProcessor server = new ContainerSocketProcessor(this, count, select);
socketConnection = new SocketConnection(authReq ? new AuthRequiredServer(server) : server);
socketConnection.connect(new InetSocketAddress(port), context);
}
|
#vulnerable code
@Override
public void start(int port, SSLContext context, boolean authReq) throws IOException {
SocketConnection socketConnection = new SocketConnection(authReq ? new AuthRequiredServer(server) : server);
socketConnection.connect(new InetSocketAddress(port), context);
}
#location 4
#vulnerability type RESOURCE_LEAK
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Override
public FloatProcessor toFloat(int channelNumber, FloatProcessor fp)
{
int width = getWidth();
int height = getHeight();
long size = ImageUtils.getTotalSamples(this.imageData);
if (fp == null || fp.getWidth()!=width || fp.getHeight()!=height)
fp = new FloatProcessor(width, height, new float[(int)size], super.cm); // TODO : notice that we can't get more than 2 gig of floats
int[] origin = Index.create(0, 0, getPlanePosition());
int[] span = Span.singlePlane(width, height, this.imageData.getNumDimensions());
SetFloatValuesOperation<T> floatOp = new SetFloatValuesOperation<T>(this.imageData, origin, span, fp);
Operation.apply(floatOp);
fp.setRoi(getRoi());
fp.setMask(getMask());
fp.setMinAndMax(this.min, this.max);
fp.setThreshold(getMinThreshold(), getMaxThreshold(), ImageProcessor.NO_LUT_UPDATE);
return fp;
}
|
#vulnerable code
@Override
public FloatProcessor toFloat(int channelNumber, FloatProcessor fp)
{
int width = getWidth();
int height = getHeight();
long size = getNumPixels(this.imageData);
if (fp == null || fp.getWidth()!=width || fp.getHeight()!=height)
fp = new FloatProcessor(width, height, new float[(int)size], super.cm);
int[] origin = Index.create(0, 0, getPlanePosition());
int[] span = Span.singlePlane(width, height, this.imageData.getNumDimensions());
SetFloatValuesOperation<T> floatOp = new SetFloatValuesOperation<T>(this.imageData, origin, span, fp);
Operation.apply(floatOp);
fp.setRoi(getRoi());
fp.setMask(getMask());
fp.setMinAndMax(this.min, this.max);
fp.setThreshold(getMinThreshold(), getMaxThreshold(), ImageProcessor.NO_LUT_UPDATE);
return fp;
}
#location 12
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
public AttributeBuilder nodeAttributes(String groupId, String artifactId, String version, String scopes, String effectiveScope) {
AbstractNode node = this.scopeStyles.containsKey(effectiveScope) ? this.scopeStyles.get(effectiveScope) : this.defaultNode;
return node.createAttributes(groupId, artifactId, version, scopes, node != this.defaultNode);
}
|
#vulnerable code
public AttributeBuilder nodeAttributes(String groupId, String artifactId, String version, String scopes, String effectiveScope) {
Map<String, ? extends AbstractNode> scopedNodes = getScopedNodes();
AbstractNode node = scopedNodes.containsKey(effectiveScope) ? scopedNodes.get(effectiveScope) : getDefaultNode();
return node.createAttributes(groupId, artifactId, version, scopes, node != this.defaultNode && node != EMPTY_NODE);
}
#location 4
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@PreDestroy
public void destroy() {
boolean isOsWindows = isOsWindows();
if (isOsWindows) {
logger.debug("Initiating removal of windows tray icon (if it exists)");
try {
WindowsTrayIcon.remove();
} catch (Throwable e) {
//An exception might be thrown while shutting down, ignore this
}
}
logger.info("Shutting down");
}
|
#vulnerable code
@PreDestroy
public void destroy() {
String osName = System.getProperty("os.name");
boolean isOsWindows = osName.toLowerCase().contains("windows");
if (isOsWindows) {
logger.debug("Initiating removal of windows tray icon (if it exists)");
try {
WindowsTrayIcon.remove();
} catch (Throwable e) {
//An exception might be thrown while shutting down, ignore this
}
}
logger.info("Shutting down");
}
#location 4
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@PostConstruct
private void addTrayIconIfApplicable() {
boolean isOsWindows = isOsWindows();
if (isOsWindows) {
logger.info("Adding windows system tray icon");
try {
new WindowsTrayIcon();
} catch (HeadlessException e) {
logger.error("Can't add a windows tray icon because running headless");
}
}
}
|
#vulnerable code
@PostConstruct
private void addTrayIconIfApplicable() {
String osName = System.getProperty("os.name");
boolean isOsWindows = osName.toLowerCase().contains("windows");
if (isOsWindows) {
logger.info("Adding windows system tray icon");
try {
new WindowsTrayIcon();
} catch (HeadlessException e) {
logger.error("Can't add a windows tray icon because running headless");
}
}
}
#location 4
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
public BaseConfig originalConfig() throws IOException {
String applicationYmlContent;
try (BufferedReader reader = new BufferedReader(new InputStreamReader(BaseConfig.class.getResource("/config/baseConfig.yml").openStream()))) {
applicationYmlContent = reader.lines().collect(Collectors.joining("\n"));
}
return objectMapper.readValue(applicationYmlContent, BaseConfig.class);
}
|
#vulnerable code
public BaseConfig originalConfig() throws IOException {
BufferedReader reader = new BufferedReader(new InputStreamReader(BaseConfig.class.getResource("/config/baseConfig.yml").openStream()));
String applicationYmlContent = reader.lines().collect(Collectors.joining("\n"));
return objectMapper.readValue(applicationYmlContent, BaseConfig.class);
}
#location 3
#vulnerability type RESOURCE_LEAK
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Test
public void shouldSetEnabledOnDownloadEvent() {
testee.queueCheckEnabled = false;
testee.lastDownload = null;
testee.onNzbDownloadEvent(new FileDownloadEvent(new FileDownloadEntity(), new SearchResultEntity()));
assertThat(testee.queueCheckEnabled).isTrue();
assertThat(testee.lastDownload).isNotNull();
}
|
#vulnerable code
@Test
public void shouldSetEnabledOnDownloadEvent() {
testee.queueCheckEnabled = false;
testee.lastDownload = null;
testee.onNzbDownloadEvent(new FileDownloadEvent(null, null));
assertThat(testee.queueCheckEnabled).isTrue();
assertThat(testee.lastDownload).isNotNull();
}
#location 6
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Override
public synchronized Collection<NodeProvisioner.PlannedNode> provision(Label label, int excessWorkload) {
try {
LOGGER.log(Level.INFO, "Asked to provision {0} slave(s) for: {1}", new Object[]{excessWorkload,label});
List<NodeProvisioner.PlannedNode> r = new ArrayList<NodeProvisioner.PlannedNode>();
final List<DockerTemplate> templates = getTemplates(label);
while (excessWorkload > 0 && !templates.isEmpty()) {
final DockerTemplate t = templates.get(0); // get first
LOGGER.log(Level.INFO, "Will provision \"{0}\" for: {1}", new Object[]{t.image,label});
try {
if (!addProvisionedSlave(t)) {
templates.remove(t);
continue;
}
} catch (Exception e) {
LOGGER.log(Level.WARNING, "Bad template {0}: {1}. Trying next template...",
new Object[]{t.image, e.getMessage()});
templates.remove(t);
continue;
}
r.add(new NodeProvisioner.PlannedNode(t.getDisplayName(),
Computer.threadPoolForRemoting.submit(new Callable<Node>() {
public Node call() throws Exception {
// TODO: record the output somewhere
DockerSlave slave = null;
try {
slave = t.provision(new StreamTaskListener(System.out));
final Jenkins jenkins = Jenkins.getInstance();
// TODO once the baseline is 1.592+ switch to Queue.withLock
synchronized (jenkins.getQueue()) {
jenkins.addNode(slave);
}
// Docker instances may have a long init script. If we declare
// the provisioning complete by returning without the connect
// operation, NodeProvisioner may decide that it still wants
// one more instance, because it sees that (1) all the slaves
// are offline (because it's still being launched) and
// (2) there's no capacity provisioned yet.
//
// deferring the completion of provisioning until the launch
// goes successful prevents this problem.
slave.toComputer().connect(false).get();
return slave;
}
catch(Exception ex) {
LOGGER.log(Level.SEVERE, "Error in provisioning; slave=" + slave + ", template=" + t);
ex.printStackTrace();
throw Throwables.propagate(ex);
}
finally {
decrementAmiSlaveProvision(t.image);
}
}
})
,t.getNumExecutors()));
excessWorkload -= t.getNumExecutors();
}
return r;
} catch (Exception e) {
LOGGER.log(Level.SEVERE,"Exception while provisioning for: " + label,e);
return Collections.emptyList();
}
}
|
#vulnerable code
@Override
public synchronized Collection<NodeProvisioner.PlannedNode> provision(Label label, int excessWorkload) {
try {
LOGGER.log(Level.INFO, "Asked to provision {0} slave(s) for: {1}", new Object[]{excessWorkload,label});
List<NodeProvisioner.PlannedNode> r = new ArrayList<NodeProvisioner.PlannedNode>();
final DockerTemplate t = getTemplate(label);
LOGGER.log(Level.INFO, "Will provision \"{0}\" for: {1}", new Object[]{t.image,label});
while (excessWorkload>0) {
if (!addProvisionedSlave(t.image, t.instanceCap)) {
break;
}
r.add(new NodeProvisioner.PlannedNode(t.getDisplayName(),
Computer.threadPoolForRemoting.submit(new Callable<Node>() {
public Node call() throws Exception {
// TODO: record the output somewhere
DockerSlave slave = null;
try {
slave = t.provision(new StreamTaskListener(System.out));
final Jenkins jenkins = Jenkins.getInstance();
// TODO once the baseline is 1.592+ switch to Queue.withLock
synchronized (jenkins.getQueue()) {
jenkins.addNode(slave);
}
// Docker instances may have a long init script. If we declare
// the provisioning complete by returning without the connect
// operation, NodeProvisioner may decide that it still wants
// one more instance, because it sees that (1) all the slaves
// are offline (because it's still being launched) and
// (2) there's no capacity provisioned yet.
//
// deferring the completion of provisioning until the launch
// goes successful prevents this problem.
slave.toComputer().connect(false).get();
return slave;
}
catch(Exception ex) {
LOGGER.log(Level.SEVERE, "Error in provisioning; slave=" + slave + ", template=" + t);
ex.printStackTrace();
throw Throwables.propagate(ex);
}
finally {
decrementAmiSlaveProvision(t.image);
}
}
})
,t.getNumExecutors()));
excessWorkload -= t.getNumExecutors();
}
return r;
} catch (Exception e) {
LOGGER.log(Level.SEVERE,"Exception while provisioning for: " + label,e);
return Collections.emptyList();
}
}
#location 11
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
private void pullImage(DockerTemplate dockerTemplate) throws IOException {
final String imageName = dockerTemplate.getDockerTemplateBase().getImage();
if (shouldPullImage(imageName, dockerTemplate.getPullStrategy())) {
LOGGER.info("Pulling image '{}'. This may take awhile...", imageName);
long startTime = System.currentTimeMillis();
PullImageCmd imgCmd = getClient().pullImageCmd(imageName);
final DockerRegistryEndpoint registry = dockerTemplate.getRegistry();
setRegistryAuthentication(imgCmd, registry);
imgCmd.exec(new PullImageResultCallback()).awaitSuccess();
long pullTime = System.currentTimeMillis() - startTime;
LOGGER.info("Finished pulling image '{}', took {} ms", imageName, pullTime);
}
}
|
#vulnerable code
private void pullImage(DockerTemplate dockerTemplate) throws IOException {
final String imageName = dockerTemplate.getDockerTemplateBase().getImage();
if (shouldPullImage(imageName, dockerTemplate.getPullStrategy())) {
LOGGER.info("Pulling image '{}'. This may take awhile...", imageName);
long startTime = System.currentTimeMillis();
PullImageCmd imgCmd = getClient().pullImageCmd(imageName);
final DockerRegistryEndpoint registry = dockerTemplate.getRegistry();
if (registry == null) {
DockerRegistryToken token = registry.getToken(null);
AuthConfig auth = new AuthConfig()
.withRegistryAddress(registry.getUrl())
.withEmail(token.getEmail())
.withRegistrytoken(token.getToken());
imgCmd.withAuthConfig(auth);
}
imgCmd.exec(new PullImageResultCallback()).awaitSuccess();
long pullTime = System.currentTimeMillis() - startTime;
LOGGER.info("Finished pulling image '{}', took {} ms", imageName, pullTime);
}
}
#location 13
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Test
public void testDecompressionWithZLIB() throws Exception {
// Build a datagram packet.
DatagramPacket gelfMessage = GELFTestHelper.buildZLIBCompressedDatagramPacket(this.originalMessage);
// Let the decompression take place.
SimpleGELFClientHandler handler = new SimpleGELFClientHandler(gelfMessage, "foo");
assertEquals(handler.getClientMessage(), this.originalMessage);
}
|
#vulnerable code
@Test
public void testDecompressionWithZLIB() throws Exception {
// ZLIB compress message.
byte[] compressMe = this.originalMessage.getBytes();
byte[] compressedMessage = new byte[compressMe.length];
Deflater compressor = new Deflater();
compressor.setInput(compressMe);
compressor.finish();
compressor.deflate(compressedMessage);
// Build a datagram packet.
DatagramPacket gelfMessage = new DatagramPacket(compressedMessage, compressedMessage.length);
// Let the decompression take place.
SimpleGELFClientHandler handler = new SimpleGELFClientHandler(gelfMessage, "foo");
assertEquals(handler.getClientMessage(), this.originalMessage);
}
#location 9
#vulnerability type RESOURCE_LEAK
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Test
public void testDeleteViewIndexSequences() throws Exception {
createBaseTable(tableName, false, null, null);
Connection conn1 = getConnection();
Connection conn2 = getConnection();
String viewName = schemaName + "." + VIEW_NAME;
conn1.createStatement().execute("CREATE VIEW " + viewName + " AS SELECT * FROM " + tableName);
conn1.createStatement().execute("CREATE INDEX " + indexName + " ON " + viewName + " (v1)");
conn2.createStatement().executeQuery("SELECT * FROM " + tableName).next();
String query = "SELECT sequence_schema, sequence_name, current_value, increment_by FROM SYSTEM.\"SEQUENCE\" WHERE sequence_schema like '%"
+ schemaName + "%'";
ResultSet rs = conn1.prepareStatement(query).executeQuery();
assertTrue(rs.next());
assertEquals(MetaDataUtil.getViewIndexSequenceSchemaName(PNameFactory.newName(tableName), isNamespaceMapped),
rs.getString("sequence_schema"));
assertEquals(MetaDataUtil.getViewIndexSequenceName(PNameFactory.newName(tableName), null, isNamespaceMapped),
rs.getString("sequence_name"));
assertEquals(-32767, rs.getInt("current_value"));
assertEquals(1, rs.getInt("increment_by"));
assertFalse(rs.next());
HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
conn1.createStatement().execute("DROP VIEW " + viewName);
conn1.createStatement().execute("DROP TABLE "+ tableName);
admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
assertFalse("View index table should be deleted.", admin.tableExists(TableName.valueOf(viewIndexPhysicalTableName)));
rs = conn2.createStatement().executeQuery("SELECT "
+ PhoenixDatabaseMetaData.SEQUENCE_SCHEMA + ","
+ PhoenixDatabaseMetaData.SEQUENCE_NAME
+ " FROM " + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE);
assertFalse("View index sequences should be deleted.", rs.next());
}
|
#vulnerable code
@Test
public void testDeleteViewIndexSequences() throws Exception {
createBaseTable(tableName, false, null, null);
Connection conn1 = getConnection();
Connection conn2 = getConnection();
conn1.createStatement().execute("CREATE VIEW " + VIEW_NAME + " AS SELECT * FROM " + tableName);
conn1.createStatement().execute("CREATE INDEX " + indexName + " ON " + VIEW_NAME + " (v1)");
conn2.createStatement().executeQuery("SELECT * FROM " + tableName).next();
HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
conn1.createStatement().execute("DROP VIEW " + VIEW_NAME);
conn1.createStatement().execute("DROP TABLE "+ tableName);
admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
assertFalse("View index table should be deleted.", admin.tableExists(TableName.valueOf(viewIndexPhysicalTableName)));
ResultSet rs = conn2.createStatement().executeQuery("SELECT "
+ PhoenixDatabaseMetaData.SEQUENCE_SCHEMA + ","
+ PhoenixDatabaseMetaData.SEQUENCE_NAME
+ " FROM " + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE);
assertFalse("View index sequences should be deleted.", rs.next());
}
#location 13
#vulnerability type RESOURCE_LEAK
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
private PTable doGetTable(byte[] tenantId, byte[] schemaName, byte[] tableName,
long clientTimeStamp, RowLock rowLock, int clientVersion, boolean skipAddingIndexes,
boolean skipAddingParentColumns, PTable lockedAncestorTable) throws IOException, SQLException {
Region region = env.getRegion();
final byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
// if this region doesn't contain the metadata rows look up the table by using PhoenixRuntime.getTable
if (!region.getRegionInfo().containsRow(key)) {
Properties props = new Properties();
if (tenantId != null) {
props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, Bytes.toString(tenantId));
}
if (clientTimeStamp != HConstants.LATEST_TIMESTAMP) {
props.setProperty("CurrentSCN", Long.toString(clientTimeStamp));
}
try (PhoenixConnection connection =
QueryUtil.getConnectionOnServer(props, env.getConfiguration())
.unwrap(PhoenixConnection.class)) {
ConnectionQueryServices queryServices = connection.getQueryServices();
MetaDataMutationResult result =
queryServices.getTable(PNameFactory.newName(tenantId), schemaName,
tableName, HConstants.LATEST_TIMESTAMP, clientTimeStamp,
skipAddingIndexes, skipAddingParentColumns, lockedAncestorTable);
return result.getTable();
} catch (ClassNotFoundException e) {
}
}
ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key);
// Ask Lars about the expense of this call - if we don't take the lock, we still won't get
// partial results
// get the co-processor environment
// TODO: check that key is within region.getStartKey() and region.getEndKey()
// and return special code to force client to lookup region from meta.
/*
* Lock directly on key, though it may be an index table. This will just prevent a table
* from getting rebuilt too often.
*/
final boolean wasLocked = (rowLock != null);
try {
if (!wasLocked) {
rowLock = acquireLock(region, key, null);
}
PTable table =
getTableFromCache(cacheKey, clientTimeStamp, clientVersion, skipAddingIndexes,
skipAddingParentColumns, lockedAncestorTable);
table = modifyIndexStateForOldClient(clientVersion, table);
// We only cache the latest, so we'll end up building the table with every call if the
// client connection has specified an SCN.
// TODO: If we indicate to the client that we're returning an older version, but there's a
// newer version available, the client
// can safely not call this, since we only allow modifications to the latest.
if (table != null && table.getTimeStamp() < clientTimeStamp) {
// Table on client is up-to-date with table on server, so just return
if (isTableDeleted(table)) {
return null;
}
return table;
}
// Query for the latest table first, since it's not cached
table =
buildTable(key, cacheKey, region, HConstants.LATEST_TIMESTAMP, clientVersion,
skipAddingIndexes, skipAddingParentColumns, lockedAncestorTable);
if ((table != null && table.getTimeStamp() < clientTimeStamp) ||
(blockWriteRebuildIndex && table.getIndexDisableTimestamp() > 0)) {
return table;
}
// Otherwise, query for an older version of the table - it won't be cached
table =
buildTable(key, cacheKey, region, clientTimeStamp, clientVersion,
skipAddingIndexes, skipAddingParentColumns, lockedAncestorTable);
return table;
} finally {
if (!wasLocked && rowLock!=null) rowLock.release();
}
}
|
#vulnerable code
private PTable doGetTable(byte[] tenantId, byte[] schemaName, byte[] tableName,
long clientTimeStamp, RowLock rowLock, int clientVersion, boolean skipAddingIndexes,
boolean skipAddingParentColumns, PTable lockedAncestorTable) throws IOException, SQLException {
Region region = env.getRegion();
final byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
// if this region doesn't contain the metadata rows look up the table by using PhoenixRuntime.getTable
if (!region.getRegionInfo().containsRow(key)) {
Properties props = new Properties();
if (tenantId != null) {
props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, Bytes.toString(tenantId));
}
if (clientTimeStamp != HConstants.LATEST_TIMESTAMP) {
props.setProperty("CurrentSCN", Long.toString(clientTimeStamp));
}
try (PhoenixConnection connection =
QueryUtil.getConnectionOnServer(props, env.getConfiguration())
.unwrap(PhoenixConnection.class)) {
ConnectionQueryServices queryServices = connection.getQueryServices();
MetaDataMutationResult result =
queryServices.getTable(PNameFactory.newName(tenantId), schemaName,
tableName, HConstants.LATEST_TIMESTAMP, clientTimeStamp,
skipAddingIndexes, skipAddingParentColumns, lockedAncestorTable);
return result.getTable();
} catch (ClassNotFoundException e) {
}
}
ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key);
// Ask Lars about the expense of this call - if we don't take the lock, we still won't get
// partial results
// get the co-processor environment
// TODO: check that key is within region.getStartKey() and region.getEndKey()
// and return special code to force client to lookup region from meta.
/*
* Lock directly on key, though it may be an index table. This will just prevent a table
* from getting rebuilt too often.
*/
final boolean wasLocked = (rowLock != null);
try {
if (!wasLocked) {
rowLock = acquireLock(region, key, null);
}
PTable table =
getTableFromCache(cacheKey, clientTimeStamp, clientVersion, skipAddingIndexes,
skipAddingParentColumns, lockedAncestorTable);
// We only cache the latest, so we'll end up building the table with every call if the
// client connection has specified an SCN.
// TODO: If we indicate to the client that we're returning an older version, but there's a
// newer version available, the client
// can safely not call this, since we only allow modifications to the latest.
if (table != null && table.getTimeStamp() < clientTimeStamp) {
// Table on client is up-to-date with table on server, so just return
if (isTableDeleted(table)) {
return null;
}
return table;
}
// Query for the latest table first, since it's not cached
table =
buildTable(key, cacheKey, region, HConstants.LATEST_TIMESTAMP, clientVersion,
skipAddingIndexes, skipAddingParentColumns, lockedAncestorTable);
if ((table != null && table.getTimeStamp() < clientTimeStamp) ||
(blockWriteRebuildIndex && table.getIndexDisableTimestamp() > 0)) {
return table;
}
// Otherwise, query for an older version of the table - it won't be cached
table =
buildTable(key, cacheKey, region, clientTimeStamp, clientVersion,
skipAddingIndexes, skipAddingParentColumns, lockedAncestorTable);
return table;
} finally {
if (!wasLocked && rowLock!=null) rowLock.release();
}
}
#location 63
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Test
public void testSystemCatalogWALEntryFilter() throws Exception {
//now create WAL.Entry objects that refer to cells in those view rows in System.Catalog
Get tenantViewGet = getTenantViewGet(catalogTable, TENANT_BYTES, TENANT_VIEW_NAME);
Get nonTenantViewGet = getTenantViewGet(catalogTable,
DEFAULT_TENANT_BYTES, NONTENANT_VIEW_NAME);
Get tenantLinkGet = getParentChildLinkGet(catalogTable, TENANT_BYTES, TENANT_VIEW_NAME);
Get nonTenantLinkGet = getParentChildLinkGet(catalogTable,
DEFAULT_TENANT_BYTES, NONTENANT_VIEW_NAME);
WAL.Entry nonTenantViewEntry = getEntry(systemCatalogTableName, nonTenantViewGet);
WAL.Entry tenantViewEntry = getEntry(systemCatalogTableName, tenantViewGet);
WAL.Entry nonTenantLinkEntry = getEntry(systemCatalogTableName, nonTenantLinkGet);
WAL.Entry tenantLinkEntry = getEntry(systemCatalogTableName, tenantLinkGet);
//verify that the tenant view WAL.Entry passes the filter and the non-tenant view does not
SystemCatalogWALEntryFilter filter = new SystemCatalogWALEntryFilter();
Assert.assertNull(filter.filter(nonTenantViewEntry));
WAL.Entry filteredTenantEntry = filter.filter(tenantViewEntry);
Assert.assertNotNull("Tenant view was filtered when it shouldn't be!", filteredTenantEntry);
Assert.assertEquals(tenantViewEntry.getEdit().size(),
filter.filter(tenantViewEntry).getEdit().size());
//now check that a WAL.Entry with cells from both a tenant and a non-tenant
//catalog row only allow the tenant cells through
WALEdit comboEdit = new WALEdit();
comboEdit.getCells().addAll(nonTenantViewEntry.getEdit().getCells());
comboEdit.getCells().addAll(tenantViewEntry.getEdit().getCells());
WAL.Entry comboEntry = new WAL.Entry(walKey, comboEdit);
Assert.assertEquals(tenantViewEntry.getEdit().size() + nonTenantViewEntry.getEdit().size()
, comboEntry.getEdit().size());
Assert.assertEquals(tenantViewEntry.getEdit().size(),
filter.filter(comboEntry).getEdit().size());
//now check that the parent-child links (which have the tenant_id of the view's parent,
// but are a part of the view's metadata) are migrated in the tenant case
// but not the non-tenant. The view's tenant_id is in th System.Catalog.COLUMN_NAME field
Assert.assertNull("Non-tenant parent-child link was not filtered " +
"when it should be!", filter.filter(nonTenantLinkEntry));
Assert.assertNotNull("Tenant parent-child link was filtered when it should not be!",
filter.filter(tenantLinkEntry));
Assert.assertEquals(tenantLinkEntry.getEdit().size(),
filter.filter(tenantLinkEntry).getEdit().size());
//add the parent-child link to the tenant view WAL entry,
//since they'll usually be together and they both need to
//be replicated
tenantViewEntry.getEdit().getCells().addAll(tenantLinkEntry.getEdit().getCells());
Assert.assertEquals(tenantViewEntry.getEdit().size(), tenantViewEntry.getEdit().size());
}
|
#vulnerable code
@Test
public void testSystemCatalogWALEntryFilter() throws Exception {
//now create WAL.Entry objects that refer to cells in those view rows in System.Catalog
Get tenantGet = getGet(catalogTable, TENANT_BYTES, TENANT_VIEW_NAME);
Get nonTenantGet = getGet(catalogTable, DEFAULT_TENANT_BYTES, NONTENANT_VIEW_NAME);
WAL.Entry nonTenantEntry = getEntry(systemCatalogTableName, nonTenantGet);
WAL.Entry tenantEntry = getEntry(systemCatalogTableName, tenantGet);
//verify that the tenant view WAL.Entry passes the filter and the non-tenant view does not
SystemCatalogWALEntryFilter filter = new SystemCatalogWALEntryFilter();
Assert.assertNull(filter.filter(nonTenantEntry));
WAL.Entry filteredTenantEntry = filter.filter(tenantEntry);
Assert.assertNotNull("Tenant view was filtered when it shouldn't be!", filteredTenantEntry);
Assert.assertEquals(tenantEntry.getEdit().size(),
filter.filter(tenantEntry).getEdit().size());
//now check that a WAL.Entry with cells from both a tenant and a non-tenant
//catalog row only allow the tenant cells through
WALEdit comboEdit = new WALEdit();
comboEdit.getCells().addAll(nonTenantEntry.getEdit().getCells());
comboEdit.getCells().addAll(tenantEntry.getEdit().getCells());
WAL.Entry comboEntry = new WAL.Entry(walKey, comboEdit);
Assert.assertEquals(tenantEntry.getEdit().size() + nonTenantEntry.getEdit().size()
, comboEntry.getEdit().size());
Assert.assertEquals(tenantEntry.getEdit().size(),
filter.filter(comboEntry).getEdit().size());
}
#location 18
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Test
public void testUpsertSelectSameBatchConcurrently() throws Exception {
try (Connection conn = driver.connect(url, props)) {
int numUpsertSelectRunners = 5;
ExecutorService exec = Executors.newFixedThreadPool(numUpsertSelectRunners);
CompletionService<Boolean> completionService = new ExecutorCompletionService<Boolean>(exec);
List<Future<Boolean>> futures = Lists.newArrayListWithExpectedSize(numUpsertSelectRunners);
// run one UPSERT SELECT for 100 rows (that locks the rows for a long time)
futures.add(completionService.submit(new UpsertSelectRunner(dataTable, 0, 105, 1)));
// run four UPSERT SELECTS for 5 rows (that overlap with slow running UPSERT SELECT)
for (int i = 0; i < 100; i += 25) {
futures.add(completionService.submit(new UpsertSelectRunner(dataTable, i, i+25, 5)));
}
int received = 0;
while (received < futures.size()) {
Future<Boolean> resultFuture = completionService.take();
Boolean result = resultFuture.get();
received++;
assertTrue(result);
}
exec.shutdownNow();
}
}
|
#vulnerable code
@Test
public void testUpsertSelectSameBatchConcurrently() throws Exception {
final String dataTable = generateUniqueName();
final String index = "IDX_" + dataTable;
// create the table and ensure its empty
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = driver.connect(url, props);
conn.createStatement()
.execute("CREATE TABLE " + dataTable + " (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
// create the index and ensure its empty as well
conn.createStatement().execute("CREATE INDEX " + index + " ON " + dataTable + " (v1)");
conn = DriverManager.getConnection(getUrl(), props);
PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + dataTable + " VALUES(?,?,?)");
conn.setAutoCommit(false);
for (int i = 0; i < 100; i++) {
stmt.setInt(1, i);
stmt.setString(2, "v1" + i);
stmt.setString(3, "v2" + i);
stmt.execute();
}
conn.commit();
int numUpsertSelectRunners = 5;
ExecutorService exec = Executors.newFixedThreadPool(numUpsertSelectRunners);
CompletionService<Boolean> completionService = new ExecutorCompletionService<Boolean>(exec);
List<Future<Boolean>> futures = Lists.newArrayListWithExpectedSize(numUpsertSelectRunners);
// run one UPSERT SELECT for 100 rows (that locks the rows for a long time)
futures.add(completionService.submit(new UpsertSelectRunner(dataTable, 0, 105, 1)));
// run four UPSERT SELECTS for 5 rows (that overlap with slow running UPSERT SELECT)
for (int i = 0; i < 100; i += 25) {
futures.add(completionService.submit(new UpsertSelectRunner(dataTable, i, i+25, 5)));
}
int received = 0;
while (received < futures.size()) {
Future<Boolean> resultFuture = completionService.take();
Boolean result = resultFuture.get();
received++;
assertTrue(result);
}
exec.shutdownNow();
conn.close();
}
#location 8
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
protected static void setupTxManager() throws SQLException, IOException {
TransactionFactory.getTransactionProvider().getTransactionContext().setupTxManager(config, getUrl());
}
|
#vulnerable code
protected static void setupTxManager() throws SQLException, IOException {
TransactionFactory.getTransactionFactory().getTransactionContext().setupTxManager(config, getUrl());
}
#location 2
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Test
public void testExternalTxContext() throws Exception {
ResultSet rs;
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(getUrl(), props);
conn.setAutoCommit(false);
String fullTableName = generateUniqueName();
PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
Statement stmt = conn.createStatement();
stmt.execute("CREATE TABLE " + fullTableName + "(K VARCHAR PRIMARY KEY, V1 VARCHAR, V2 VARCHAR) TRANSACTIONAL=true");
Table htable = pconn.getQueryServices().getTable(Bytes.toBytes(fullTableName));
stmt.executeUpdate("upsert into " + fullTableName + " values('x', 'a', 'a')");
conn.commit();
try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(1,rs.getInt(1));
}
PhoenixTransactionContext txContext =
TransactionFactory.getTransactionProvider().getTransactionContext(pconn);
PhoenixTransactionalTable txTable =
TransactionFactory.getTransactionProvider().getTransactionalTable(txContext, htable);
txContext.begin();
// Use HBase APIs to add a new row
Put put = new Put(Bytes.toBytes("z"));
put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V1"), Bytes.toBytes("b"));
txTable.put(put);
// Use Phoenix APIs to add new row (sharing the transaction context)
pconn.setTransactionContext(txContext);
conn.createStatement().executeUpdate("upsert into " + fullTableName + " values('y', 'c', 'c')");
// New connection should not see data as it hasn't been committed yet
try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(1,rs.getInt(1));
}
// Use new connection to create a row with a conflict
Connection connWithConflict = DriverManager.getConnection(getUrl(), props);
connWithConflict.createStatement().execute("upsert into " + fullTableName + " values('z', 'd', 'd')");
// Existing connection should see data even though it hasn't been committed yet
rs = conn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(3,rs.getInt(1));
// Use TM APIs directly to finish (i.e. commit) the transaction
txContext.commit();
// Confirm that attempt to commit row with conflict fails
try {
connWithConflict.commit();
fail();
} catch (SQLException e) {
assertEquals(SQLExceptionCode.TRANSACTION_CONFLICT_EXCEPTION.getErrorCode(), e.getErrorCode());
}
// New connection should now see data as it has been committed
try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(3,rs.getInt(1));
}
// Repeat the same as above, but this time abort the transaction
txContext =
TransactionFactory.getTransactionProvider().getTransactionContext(pconn);
txTable =
TransactionFactory.getTransactionProvider().getTransactionalTable(txContext, htable);
txContext.begin();
// Use HBase APIs to add a new row
put = new Put(Bytes.toBytes("j"));
put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V1"), Bytes.toBytes("e"));
txTable.put(put);
// Use Phoenix APIs to add new row (sharing the transaction context)
pconn.setTransactionContext(txContext);
conn.createStatement().executeUpdate("upsert into " + fullTableName + " values('k', 'f', 'f')");
// Existing connection should see data even though it hasn't been committed yet
rs = conn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(5,rs.getInt(1));
connWithConflict.createStatement().execute("upsert into " + fullTableName + " values('k', 'g', 'g')");
rs = connWithConflict.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(4,rs.getInt(1));
// Use TM APIs directly to abort (i.e. rollback) the transaction
txContext.abort();
rs = conn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(3,rs.getInt(1));
// Should succeed since conflicting row was aborted
connWithConflict.commit();
// New connection should now see data as it has been committed
try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(4,rs.getInt(1));
}
// Even using HBase APIs directly, we shouldn't find 'j' since a delete marker would have been
// written to hide it.
Result result = htable.get(new Get(Bytes.toBytes("j")));
assertTrue(result.isEmpty());
}
|
#vulnerable code
@Test
public void testExternalTxContext() throws Exception {
ResultSet rs;
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(getUrl(), props);
conn.setAutoCommit(false);
String fullTableName = generateUniqueName();
PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
Statement stmt = conn.createStatement();
stmt.execute("CREATE TABLE " + fullTableName + "(K VARCHAR PRIMARY KEY, V1 VARCHAR, V2 VARCHAR) TRANSACTIONAL=true");
Table htable = pconn.getQueryServices().getTable(Bytes.toBytes(fullTableName));
stmt.executeUpdate("upsert into " + fullTableName + " values('x', 'a', 'a')");
conn.commit();
try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(1,rs.getInt(1));
}
PhoenixTransactionContext txContext =
TransactionFactory.getTransactionFactory().getTransactionContext(pconn);
PhoenixTransactionalTable txTable =
TransactionFactory.getTransactionFactory().getTransactionalTable(txContext, htable);
txContext.begin();
// Use HBase APIs to add a new row
Put put = new Put(Bytes.toBytes("z"));
put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V1"), Bytes.toBytes("b"));
txTable.put(put);
// Use Phoenix APIs to add new row (sharing the transaction context)
pconn.setTransactionContext(txContext);
conn.createStatement().executeUpdate("upsert into " + fullTableName + " values('y', 'c', 'c')");
// New connection should not see data as it hasn't been committed yet
try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(1,rs.getInt(1));
}
// Use new connection to create a row with a conflict
Connection connWithConflict = DriverManager.getConnection(getUrl(), props);
connWithConflict.createStatement().execute("upsert into " + fullTableName + " values('z', 'd', 'd')");
// Existing connection should see data even though it hasn't been committed yet
rs = conn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(3,rs.getInt(1));
// Use TM APIs directly to finish (i.e. commit) the transaction
txContext.commit();
// Confirm that attempt to commit row with conflict fails
try {
connWithConflict.commit();
fail();
} catch (SQLException e) {
assertEquals(SQLExceptionCode.TRANSACTION_CONFLICT_EXCEPTION.getErrorCode(), e.getErrorCode());
}
// New connection should now see data as it has been committed
try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(3,rs.getInt(1));
}
// Repeat the same as above, but this time abort the transaction
txContext =
TransactionFactory.getTransactionFactory().getTransactionContext(pconn);
txTable =
TransactionFactory.getTransactionFactory().getTransactionalTable(txContext, htable);
txContext.begin();
// Use HBase APIs to add a new row
put = new Put(Bytes.toBytes("j"));
put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V1"), Bytes.toBytes("e"));
txTable.put(put);
// Use Phoenix APIs to add new row (sharing the transaction context)
pconn.setTransactionContext(txContext);
conn.createStatement().executeUpdate("upsert into " + fullTableName + " values('k', 'f', 'f')");
// Existing connection should see data even though it hasn't been committed yet
rs = conn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(5,rs.getInt(1));
connWithConflict.createStatement().execute("upsert into " + fullTableName + " values('k', 'g', 'g')");
rs = connWithConflict.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(4,rs.getInt(1));
// Use TM APIs directly to abort (i.e. rollback) the transaction
txContext.abort();
rs = conn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(3,rs.getInt(1));
// Should succeed since conflicting row was aborted
connWithConflict.commit();
// New connection should now see data as it has been committed
try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(4,rs.getInt(1));
}
// Even using HBase APIs directly, we shouldn't find 'j' since a delete marker would have been
// written to hide it.
Result result = htable.get(new Get(Bytes.toBytes("j")));
assertTrue(result.isEmpty());
}
#location 27
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Test
public void testUpsertDeleteWithNewClient() throws Exception {
// Insert data with old client and read with new client
executeQueriesWithCurrentVersion(CREATE_ADD);
executeQueryWithClientVersion(compatibleClientVersion, QUERY);
assertExpectedOutput(CREATE_ADD, QUERY);
// Deletes with the new client
executeQueriesWithCurrentVersion(ADD_DELETE);
executeQueriesWithCurrentVersion(QUERY_ADD_DELETE);
assertExpectedOutput(ADD_DELETE, QUERY_ADD_DELETE);
}
|
#vulnerable code
@Test
public void testUpsertDeleteWithNewClient() throws Exception {
checkForPreConditions();
// Insert data with old client and read with new client
executeQueriesWithCurrentVersion(CREATE_ADD);
executeQueryWithClientVersion(compatibleClientVersion, QUERY);
assertTrue(compareOutput(CREATE_ADD, QUERY));
// Deletes with the new client
executeQueriesWithCurrentVersion(ADD_DELETE);
executeQueriesWithCurrentVersion(QUERY_ADD_DELETE);
assertTrue(compareOutput(ADD_DELETE, QUERY_ADD_DELETE));
}
#location 3
#vulnerability type THREAD_SAFETY_VIOLATION
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Test
public void testIndexRebuildTask() throws Throwable {
String baseTable = generateUniqueName();
String viewName = generateUniqueName();
Connection conn = null;
Connection tenantConn = null;
try {
conn = DriverManager.getConnection(getUrl());
conn.setAutoCommit(false);
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, TENANT1);
tenantConn =DriverManager.getConnection(getUrl(), props);
String ddlFormat =
"CREATE TABLE IF NOT EXISTS " + baseTable + " ("
+ " %s PK2 VARCHAR NOT NULL, V1 VARCHAR, V2 VARCHAR "
+ " CONSTRAINT NAME_PK PRIMARY KEY (%s PK2)" + " ) %s";
conn.createStatement().execute(generateDDL(ddlFormat));
conn.commit();
// Create a view
String viewDDL = "CREATE VIEW " + viewName + " AS SELECT * FROM " + baseTable;
tenantConn.createStatement().execute(viewDDL);
// Create index
String indexName = generateUniqueName();
String idxSDDL = String.format("CREATE INDEX %s ON %s (V1)", indexName, viewName);
tenantConn.createStatement().execute(idxSDDL);
// Insert rows
int numOfValues = 1000;
for (int i=0; i < numOfValues; i++){
tenantConn.createStatement().execute(
String.format("UPSERT INTO %s VALUES('%s', '%s', '%s')", viewName, String.valueOf(i), "y",
"z"));
}
tenantConn.commit();
waitForIndexRebuild(conn, indexName, PIndexState.ACTIVE);
String viewIndexTableName = MetaDataUtil.getViewIndexPhysicalName(baseTable);
ConnectionQueryServices queryServices = conn.unwrap(PhoenixConnection.class).getQueryServices();
Table indexHTable = queryServices.getTable(Bytes.toBytes(viewIndexTableName));
int count = getUtility().countRows(indexHTable);
assertEquals(numOfValues, count);
// Alter to Unusable makes the index status inactive.
// If I Alter to DISABLE, it fails to in Index tool while setting state to active due to Invalid transition.
tenantConn.createStatement().execute(
String.format("ALTER INDEX %s ON %s UNUSABLE", indexName, viewName));
tenantConn.commit();
// Remove index contents and try again
Admin admin = queryServices.getAdmin();
TableName tableName = TableName.valueOf(viewIndexTableName);
admin.disableTable(tableName);
admin.truncateTable(tableName, false);
count = getUtility().countRows(indexHTable);
assertEquals(0, count);
String data = "{IndexName:" + indexName + ", DisableBefore: true}";
// Run IndexRebuildTask
TaskRegionObserver.SelfHealingTask task =
new TaskRegionObserver.SelfHealingTask(
TaskRegionEnvironment, QueryServicesOptions.DEFAULT_TASK_HANDLING_MAX_INTERVAL_MS);
Timestamp startTs = new Timestamp(EnvironmentEdgeManager.currentTimeMillis());
Task.addTask(conn.unwrap(PhoenixConnection.class), PTable.TaskType.INDEX_REBUILD,
TENANT1, null, viewName,
PTable.TaskStatus.CREATED.toString(), data, null, startTs, null, true);
task.run();
// Check task status and other column values.
waitForTaskState(conn, PTable.TaskType.INDEX_REBUILD, viewName, PTable.TaskStatus.COMPLETED);
// See that index is rebuilt and confirm index has rows
count = getUtility().countRows(indexHTable);
assertEquals(numOfValues, count);
} finally {
if (conn != null) {
conn.createStatement().execute("DELETE " + " FROM " + PhoenixDatabaseMetaData.SYSTEM_TASK_NAME
+ " WHERE TABLE_NAME ='" + viewName + "'");
conn.commit();
conn.close();
}
if (tenantConn != null) {
tenantConn.close();
}
}
}
|
#vulnerable code
@Test
public void testIndexRebuildTask() throws Throwable {
String baseTable = generateUniqueName();
Connection conn = null;
Connection viewConn = null;
try {
conn = DriverManager.getConnection(getUrl());
conn.setAutoCommit(false);
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, TENANT1);
viewConn =DriverManager.getConnection(getUrl(), props);
String ddlFormat =
"CREATE TABLE IF NOT EXISTS " + baseTable + " ("
+ " %s PK2 VARCHAR NOT NULL, V1 VARCHAR, V2 VARCHAR "
+ " CONSTRAINT NAME_PK PRIMARY KEY (%s PK2)" + " ) %s";
conn.createStatement().execute(generateDDL(ddlFormat));
conn.commit();
// Create a view
String viewName = generateUniqueName();
String viewDDL = "CREATE VIEW " + viewName + " AS SELECT * FROM " + baseTable;
viewConn.createStatement().execute(viewDDL);
// Create index
String indexName = generateUniqueName();
String idxSDDL = String.format("CREATE INDEX %s ON %s (V1)", indexName, viewName);
viewConn.createStatement().execute(idxSDDL);
// Insert rows
int numOfValues = 1000;
for (int i=0; i < numOfValues; i++){
viewConn.createStatement().execute(
String.format("UPSERT INTO %s VALUES('%s', '%s', '%s')", viewName, String.valueOf(i), "y",
"z"));
}
viewConn.commit();
String data = "{IndexName:" + indexName + "}";
// Run IndexRebuildTask
TaskRegionObserver.SelfHealingTask task =
new TaskRegionObserver.SelfHealingTask(
TaskRegionEnvironment, QueryServicesOptions.DEFAULT_TASK_HANDLING_MAX_INTERVAL_MS);
Timestamp startTs = new Timestamp(EnvironmentEdgeManager.currentTimeMillis());
// Add a task to System.Task to build indexes
Task.addTask(conn.unwrap(PhoenixConnection.class), PTable.TaskType.INDEX_REBUILD,
TENANT1, null, viewName,
PTable.TaskStatus.CREATED.toString(), data, null, startTs, null, true);
task.run();
String viewIndexTableName = MetaDataUtil.getViewIndexPhysicalName(baseTable);
ConnectionQueryServices queryServices = conn.unwrap(PhoenixConnection.class).getQueryServices();
int count = getUtility().countRows(queryServices.getTable(Bytes.toBytes(viewIndexTableName)));
assertTrue(count == numOfValues);
// Remove index contents and try again
Admin admin = queryServices.getAdmin();
TableName tableName = TableName.valueOf(viewIndexTableName);
admin.disableTable(tableName);
admin.truncateTable(tableName, false);
data = "{IndexName:" + indexName + ", DisableBefore:true}";
// Add a new task (update status to created) to System.Task to rebuild indexes
Task.addTask(conn.unwrap(PhoenixConnection.class), PTable.TaskType.INDEX_REBUILD,
TENANT1, null, viewName,
PTable.TaskStatus.CREATED.toString(), data, null, startTs, null, true);
task.run();
Table systemHTable= queryServices.getTable(Bytes.toBytes("SYSTEM."+PhoenixDatabaseMetaData.SYSTEM_TASK_TABLE));
count = getUtility().countRows(systemHTable);
assertEquals(1, count);
// Check task status and other column values.
waitForTaskState(conn, PTable.TaskType.INDEX_REBUILD, PTable.TaskStatus.COMPLETED);
// See that index is rebuilt and confirm index has rows
Table htable= queryServices.getTable(Bytes.toBytes(viewIndexTableName));
count = getUtility().countRows(htable);
assertEquals(numOfValues, count);
} finally {
conn.createStatement().execute("DELETE " + " FROM " + PhoenixDatabaseMetaData.SYSTEM_TASK_NAME);
conn.commit();
if (conn != null) {
conn.close();
}
if (viewConn != null) {
viewConn.close();
}
}
}
#location 86
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Override
public void modifyTable(byte[] tableName, HTableDescriptor newDesc) throws IOException,
InterruptedException, TimeoutException {
try (HBaseAdmin admin = new HBaseAdmin(config)) {
if (!allowOnlineTableSchemaUpdate()) {
admin.disableTable(tableName);
admin.modifyTable(tableName, newDesc);
admin.enableTable(tableName);
} else {
admin.modifyTable(tableName, newDesc);
pollForUpdatedTableDescriptor(admin, newDesc, tableName);
}
}
}
|
#vulnerable code
@Override
public void modifyTable(byte[] tableName, HTableDescriptor newDesc) throws IOException,
InterruptedException, TimeoutException {
HBaseAdmin admin = new HBaseAdmin(config);
if (!allowOnlineTableSchemaUpdate()) {
admin.disableTable(tableName);
admin.modifyTable(tableName, newDesc);
admin.enableTable(tableName);
} else {
admin.modifyTable(tableName, newDesc);
pollForUpdatedTableDescriptor(admin, newDesc, tableName);
}
}
#location 8
#vulnerability type RESOURCE_LEAK
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Override
protected void reduce(TableRowkeyPair key, Iterable<ImmutableBytesWritable> values,
Reducer<TableRowkeyPair, ImmutableBytesWritable, TableRowkeyPair, KeyValue>.Context context)
throws IOException, InterruptedException {
TreeSet<KeyValue> map = new TreeSet<KeyValue>(KeyValue.COMPARATOR);
ImmutableBytesWritable rowKey = key.getRowkey();
for (ImmutableBytesWritable aggregatedArray : values) {
DataInputStream input = new DataInputStream(new ByteArrayInputStream(aggregatedArray.get()));
while (input.available() != 0) {
byte type = input.readByte();
int index = WritableUtils.readVInt(input);
ImmutableBytesWritable family;
ImmutableBytesWritable name;
ImmutableBytesWritable value = QueryConstants.EMPTY_COLUMN_VALUE_BYTES_PTR;
if (index == -1) {
family = emptyFamilyName.get(key.getTableName());
name = QueryConstants.EMPTY_COLUMN_BYTES_PTR;
} else {
Pair<byte[], byte[]> pair = columnIndexes.get(index);
if(pair.getFirst() != null) {
family = new ImmutableBytesWritable(pair.getFirst());
} else {
family = emptyFamilyName.get(key.getTableName());
}
name = new ImmutableBytesWritable(pair.getSecond());
}
int len = WritableUtils.readVInt(input);
if (len > 0) {
byte[] array = new byte[len];
input.read(array);
value = new ImmutableBytesWritable(array);
}
KeyValue kv;
KeyValue.Type kvType = KeyValue.Type.codeToType(type);
switch (kvType) {
case Put: // not null value
kv = builder.buildPut(key.getRowkey(), family, name, value);
break;
case DeleteColumn: // null value
kv = builder.buildDeleteColumns(key.getRowkey(), family, name);
break;
default:
throw new IOException("Unsupported KeyValue type " + kvType);
}
map.add(kv);
}
Closeables.closeQuietly(input);
}
context.setStatus("Read " + map.getClass());
int index = 0;
for (KeyValue kv : map) {
context.write(key, kv);
if (++index % 100 == 0) context.setStatus("Wrote " + index);
}
}
|
#vulnerable code
@Override
protected void reduce(TableRowkeyPair key, Iterable<ImmutableBytesWritable> values,
Reducer<TableRowkeyPair, ImmutableBytesWritable, TableRowkeyPair, KeyValue>.Context context)
throws IOException, InterruptedException {
TreeSet<KeyValue> map = new TreeSet<KeyValue>(KeyValue.COMPARATOR);
int tableIndex = tableNames.indexOf(key.getTableName());
List<Pair<byte[], byte[]>> columns = columnIndexes.get(tableIndex);
for (ImmutableBytesWritable aggregatedArray : values) {
DataInputStream input = new DataInputStream(new ByteArrayInputStream(aggregatedArray.get()));
while (input.available() != 0) {
int index = WritableUtils.readVInt(input);
Pair<byte[], byte[]> pair = columns.get(index);
byte type = input.readByte();
ImmutableBytesWritable value = null;
int len = WritableUtils.readVInt(input);
if (len > 0) {
byte[] array = new byte[len];
input.read(array);
value = new ImmutableBytesWritable(array);
}
KeyValue kv;
KeyValue.Type kvType = KeyValue.Type.codeToType(type);
switch (kvType) {
case Put: // not null value
kv = builder.buildPut(key.getRowkey(),
new ImmutableBytesWritable(pair.getFirst()),
new ImmutableBytesWritable(pair.getSecond()), value);
break;
case DeleteColumn: // null value
kv = builder.buildDeleteColumns(key.getRowkey(),
new ImmutableBytesWritable(pair.getFirst()),
new ImmutableBytesWritable(pair.getSecond()));
break;
default:
throw new IOException("Unsupported KeyValue type " + kvType);
}
map.add(kv);
}
KeyValue empty = builder.buildPut(key.getRowkey(),
emptyFamilyName.get(tableIndex),
QueryConstants.EMPTY_COLUMN_BYTES_PTR, ByteUtil.EMPTY_BYTE_ARRAY_PTR);
map.add(empty);
Closeables.closeQuietly(input);
}
context.setStatus("Read " + map.getClass());
int index = 0;
for (KeyValue kv : map) {
context.write(key, kv);
if (++index % 100 == 0) context.setStatus("Wrote " + index);
}
}
#location 20
#vulnerability type RESOURCE_LEAK
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
private DeleteType getDeleteTypeOrNull(Collection<? extends Cell> pendingUpdates, int nCFs) {
int nDeleteCF = 0;
int nDeleteVersionCF = 0;
for (Cell kv : pendingUpdates) {
if (kv.getTypeByte() == KeyValue.Type.DeleteFamilyVersion.getCode()) {
nDeleteVersionCF++;
}
else if (kv.getTypeByte() == KeyValue.Type.DeleteFamily.getCode()
// Since we don't include the index rows in the change set for txn tables, we need to detect row deletes that have transformed by TransactionProcessor
|| (CellUtil.matchingQualifier(kv, TransactionFactory.getTransactionProvider().getTransactionContext().getFamilyDeleteMarker()) && CellUtil.matchingValue(kv, HConstants.EMPTY_BYTE_ARRAY))) {
nDeleteCF++;
}
}
// This is what a delete looks like on the server side for mutable indexing...
// Should all be one or the other for DeleteFamily versus DeleteFamilyVersion, but just in case not
DeleteType deleteType = null;
if (nDeleteVersionCF > 0 && nDeleteVersionCF >= nCFs) {
deleteType = DeleteType.SINGLE_VERSION;
} else {
int nDelete = nDeleteCF + nDeleteVersionCF;
if (nDelete>0 && nDelete >= nCFs) {
deleteType = DeleteType.ALL_VERSIONS;
}
}
return deleteType;
}
|
#vulnerable code
private DeleteType getDeleteTypeOrNull(Collection<? extends Cell> pendingUpdates, int nCFs) {
int nDeleteCF = 0;
int nDeleteVersionCF = 0;
for (Cell kv : pendingUpdates) {
if (kv.getTypeByte() == KeyValue.Type.DeleteFamilyVersion.getCode()) {
nDeleteVersionCF++;
}
else if (kv.getTypeByte() == KeyValue.Type.DeleteFamily.getCode()
// Since we don't include the index rows in the change set for txn tables, we need to detect row deletes that have transformed by TransactionProcessor
|| (CellUtil.matchingQualifier(kv, TransactionFactory.getTransactionFactory().getTransactionContext().getFamilyDeleteMarker()) && CellUtil.matchingValue(kv, HConstants.EMPTY_BYTE_ARRAY))) {
nDeleteCF++;
}
}
// This is what a delete looks like on the server side for mutable indexing...
// Should all be one or the other for DeleteFamily versus DeleteFamilyVersion, but just in case not
DeleteType deleteType = null;
if (nDeleteVersionCF > 0 && nDeleteVersionCF >= nCFs) {
deleteType = DeleteType.SINGLE_VERSION;
} else {
int nDelete = nDeleteCF + nDeleteVersionCF;
if (nDelete>0 && nDelete >= nCFs) {
deleteType = DeleteType.ALL_VERSIONS;
}
}
return deleteType;
}
#location 10
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Test
public void testIndexQos() throws Exception {
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = driver.connect(getUrl(), props);
try {
// create the table
createTable(conn, dataTableFullName);
// create the index
createIndex(conn, indexName);
ensureTablesOnDifferentRegionServers(dataTableFullName, indexTableFullName);
upsertRow(conn, dataTableFullName);
// run select query that should use the index
String selectSql = "SELECT k, v2 from " + dataTableFullName + " WHERE v1=?";
PreparedStatement stmt = conn.prepareStatement(selectSql);
stmt.setString(1, "v1");
// verify that the query does a range scan on the index table
ResultSet rs = stmt.executeQuery("EXPLAIN " + selectSql);
assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + indexTableFullName + " ['v1']", QueryUtil.getExplainPlan(rs));
// verify that the correct results are returned
rs = stmt.executeQuery();
assertTrue(rs.next());
assertEquals("k1", rs.getString(1));
assertEquals("v2", rs.getString(2));
assertFalse(rs.next());
// drop index table
conn.createStatement().execute(
"DROP INDEX " + indexName + " ON " + dataTableFullName );
// create a data table with the same name as the index table
createTable(conn, indexTableFullName);
// upsert one row to the table (which has the same table name as the previous index table)
upsertRow(conn, indexTableFullName);
// run select query on the new table
selectSql = "SELECT k, v2 from " + indexTableFullName + " WHERE v1=?";
stmt = conn.prepareStatement(selectSql);
stmt.setString(1, "v1");
// verify that the correct results are returned
rs = stmt.executeQuery();
assertTrue(rs.next());
assertEquals("k1", rs.getString(1));
assertEquals("v2", rs.getString(2));
assertFalse(rs.next());
// verify that that index queue is used only once (for the first upsert)
Mockito.verify(TestPhoenixIndexRpcSchedulerFactory.getIndexRpcExecutor()).dispatch(Mockito.any(CallRunner.class));
TestPhoenixIndexRpcSchedulerFactory.reset();
createIndex(conn, indexName + "_1");
// verify that that index queue is used and only once (during Upsert Select on server to build the index)
Mockito.verify(TestPhoenixIndexRpcSchedulerFactory.getIndexRpcExecutor()).dispatch(Mockito.any(CallRunner.class));
}
finally {
conn.close();
}
}
|
#vulnerable code
@Test
public void testIndexQos() throws Exception {
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = driver.connect(getUrl(), props);
try {
// create the table
conn.createStatement().execute(
"CREATE TABLE " + dataTableFullName + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
// create the index
conn.createStatement().execute(
"CREATE INDEX " + indexName + " ON " + dataTableFullName + " (v1) INCLUDE (v2)");
ensureTablesOnDifferentRegionServers(dataTableFullName, indexTableFullName);
PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + dataTableFullName + " VALUES(?,?,?)");
stmt.setString(1, "k1");
stmt.setString(2, "v1");
stmt.setString(3, "v2");
stmt.execute();
conn.commit();
// run select query that should use the index
String selectSql = "SELECT k, v2 from " + dataTableFullName + " WHERE v1=?";
stmt = conn.prepareStatement(selectSql);
stmt.setString(1, "v1");
// verify that the query does a range scan on the index table
ResultSet rs = stmt.executeQuery("EXPLAIN " + selectSql);
assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + indexTableFullName + " ['v1']", QueryUtil.getExplainPlan(rs));
// verify that the correct results are returned
rs = stmt.executeQuery();
assertTrue(rs.next());
assertEquals("k1", rs.getString(1));
assertEquals("v2", rs.getString(2));
assertFalse(rs.next());
// drop index table
conn.createStatement().execute(
"DROP INDEX " + indexName + " ON " + dataTableFullName );
// create a data table with the same name as the index table
conn.createStatement().execute(
"CREATE TABLE " + indexTableFullName + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
// upsert one row to the table (which has the same table name as the previous index table)
stmt = conn.prepareStatement("UPSERT INTO " + indexTableFullName + " VALUES(?,?,?)");
stmt.setString(1, "k1");
stmt.setString(2, "v1");
stmt.setString(3, "v2");
stmt.execute();
conn.commit();
// run select query on the new table
selectSql = "SELECT k, v2 from " + indexTableFullName + " WHERE v1=?";
stmt = conn.prepareStatement(selectSql);
stmt.setString(1, "v1");
// verify that the correct results are returned
rs = stmt.executeQuery();
assertTrue(rs.next());
assertEquals("k1", rs.getString(1));
assertEquals("v2", rs.getString(2));
assertFalse(rs.next());
// verify that that index queue is used only once (for the first upsert)
Mockito.verify(TestPhoenixIndexRpcSchedulerFactory.getIndexRpcExecutor()).dispatch(Mockito.any(CallRunner.class));
TestPhoenixIndexRpcSchedulerFactory.reset();
conn.createStatement().execute(
"CREATE INDEX " + indexName + "_1 ON " + dataTableFullName + " (v1) INCLUDE (v2)");
// verify that that index queue is used and only once (during Upsert Select on server to build the index)
Mockito.verify(TestPhoenixIndexRpcSchedulerFactory.getIndexRpcExecutor()).dispatch(Mockito.any(CallRunner.class));
}
finally {
conn.close();
}
}
#location 7
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Override
protected RegionScanner doPostScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c, final Scan scan, final RegionScanner s) throws IOException {
RegionCoprocessorEnvironment env = c.getEnvironment();
Region region = env.getRegion();
long ts = scan.getTimeRange().getMax();
boolean localIndexScan = ScanUtil.isLocalIndex(scan);
if (ScanUtil.isAnalyzeTable(scan)) {
byte[] gp_width_bytes =
scan.getAttribute(BaseScannerRegionObserver.GUIDEPOST_WIDTH_BYTES);
byte[] gp_per_region_bytes =
scan.getAttribute(BaseScannerRegionObserver.GUIDEPOST_PER_REGION);
// Let this throw, as this scan is being done for the sole purpose of collecting stats
StatisticsCollector statsCollector = StatisticsCollectorFactory.createStatisticsCollector(
env, region.getRegionInfo().getTable().getNameAsString(), ts,
gp_width_bytes, gp_per_region_bytes);
return collectStats(s, statsCollector, region, scan, env.getConfiguration());
}
int offsetToBe = 0;
if (localIndexScan) {
/*
* For local indexes, we need to set an offset on row key expressions to skip
* the region start key.
*/
offsetToBe = region.getRegionInfo().getStartKey().length != 0 ? region.getRegionInfo().getStartKey().length :
region.getRegionInfo().getEndKey().length;
ScanUtil.setRowKeyOffset(scan, offsetToBe);
}
final int offset = offsetToBe;
PTable projectedTable = null;
PTable writeToTable = null;
byte[][] values = null;
byte[] descRowKeyTableBytes = scan.getAttribute(UPGRADE_DESC_ROW_KEY);
boolean isDescRowKeyOrderUpgrade = descRowKeyTableBytes != null;
if (isDescRowKeyOrderUpgrade) {
logger.debug("Upgrading row key for " + region.getRegionInfo().getTable().getNameAsString());
projectedTable = deserializeTable(descRowKeyTableBytes);
try {
writeToTable = PTableImpl.makePTable(projectedTable, true);
} catch (SQLException e) {
ServerUtil.throwIOException("Upgrade failed", e); // Impossible
}
values = new byte[projectedTable.getPKColumns().size()][];
}
byte[] localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD);
List<IndexMaintainer> indexMaintainers = localIndexBytes == null ? null : IndexMaintainer.deserialize(localIndexBytes);
List<Mutation> indexMutations = localIndexBytes == null ? Collections.<Mutation>emptyList() : Lists.<Mutation>newArrayListWithExpectedSize(1024);
RegionScanner theScanner = s;
byte[] indexUUID = scan.getAttribute(PhoenixIndexCodec.INDEX_UUID);
List<Expression> selectExpressions = null;
byte[] upsertSelectTable = scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_TABLE);
boolean isUpsert = false;
boolean isDelete = false;
byte[] deleteCQ = null;
byte[] deleteCF = null;
byte[] emptyCF = null;
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
if (upsertSelectTable != null) {
isUpsert = true;
projectedTable = deserializeTable(upsertSelectTable);
selectExpressions = deserializeExpressions(scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_EXPRS));
values = new byte[projectedTable.getPKColumns().size()][];
} else {
byte[] isDeleteAgg = scan.getAttribute(BaseScannerRegionObserver.DELETE_AGG);
isDelete = isDeleteAgg != null && Bytes.compareTo(PDataType.TRUE_BYTES, isDeleteAgg) == 0;
if (!isDelete) {
deleteCF = scan.getAttribute(BaseScannerRegionObserver.DELETE_CF);
deleteCQ = scan.getAttribute(BaseScannerRegionObserver.DELETE_CQ);
}
emptyCF = scan.getAttribute(BaseScannerRegionObserver.EMPTY_CF);
}
TupleProjector tupleProjector = null;
byte[][] viewConstants = null;
ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan);
final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
if ((localIndexScan && !isDelete && !isDescRowKeyOrderUpgrade) || (j == null && p != null)) {
if (dataColumns != null) {
tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns);
viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan);
}
ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
theScanner =
getWrappedScanner(c, theScanner, offset, scan, dataColumns, tupleProjector,
region, indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr);
}
if (j != null) {
theScanner = new HashJoinRegionScanner(theScanner, p, j, ScanUtil.getTenantId(scan), env);
}
int batchSize = 0;
List<Mutation> mutations = Collections.emptyList();
boolean needToWrite = false;
Configuration conf = c.getEnvironment().getConfiguration();
long flushSize = region.getTableDesc().getMemStoreFlushSize();
if (flushSize <= 0) {
flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE,
HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE);
}
/**
* Slow down the writes if the memstore size more than
* (hbase.hregion.memstore.block.multiplier - 1) times hbase.hregion.memstore.flush.size
* bytes. This avoids flush storm to hdfs for cases like index building where reads and
* write happen to all the table regions in the server.
*/
final long blockingMemStoreSize = flushSize * (
conf.getLong(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER,
HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER)-1) ;
boolean buildLocalIndex = indexMaintainers != null && dataColumns==null && !localIndexScan;
if (isDescRowKeyOrderUpgrade || isDelete || isUpsert || (deleteCQ != null && deleteCF != null) || emptyCF != null || buildLocalIndex) {
needToWrite = true;
// TODO: size better
mutations = Lists.newArrayListWithExpectedSize(1024);
batchSize = env.getConfiguration().getInt(MUTATE_BATCH_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
}
Aggregators aggregators = ServerAggregators.deserialize(
scan.getAttribute(BaseScannerRegionObserver.AGGREGATORS), env.getConfiguration());
Aggregator[] rowAggregators = aggregators.getAggregators();
boolean hasMore;
boolean hasAny = false;
MultiKeyValueTuple result = new MultiKeyValueTuple();
if (logger.isDebugEnabled()) {
logger.debug(LogUtil.addCustomAnnotations("Starting ungrouped coprocessor scan " + scan + " "+region.getRegionInfo(), ScanUtil.getCustomAnnotations(scan)));
}
long rowCount = 0;
final RegionScanner innerScanner = theScanner;
boolean acquiredLock = false;
try {
if(needToWrite) {
synchronized (lock) {
scansReferenceCount++;
}
}
region.startRegionOperation();
acquiredLock = true;
synchronized (innerScanner) {
do {
List<Cell> results = new ArrayList<Cell>();
// Results are potentially returned even when the return value of s.next is false
// since this is an indication of whether or not there are more values after the
// ones returned
hasMore = innerScanner.nextRaw(results);
if (!results.isEmpty()) {
rowCount++;
result.setKeyValues(results);
try {
if (isDescRowKeyOrderUpgrade) {
Arrays.fill(values, null);
Cell firstKV = results.get(0);
RowKeySchema schema = projectedTable.getRowKeySchema();
int maxOffset = schema.iterator(firstKV.getRowArray(), firstKV.getRowOffset() + offset, firstKV.getRowLength(), ptr);
for (int i = 0; i < schema.getFieldCount(); i++) {
Boolean hasValue = schema.next(ptr, i, maxOffset);
if (hasValue == null) {
break;
}
Field field = schema.getField(i);
if (field.getSortOrder() == SortOrder.DESC) {
// Special case for re-writing DESC ARRAY, as the actual byte value needs to change in this case
if (field.getDataType().isArrayType()) {
field.getDataType().coerceBytes(ptr, null, field.getDataType(),
field.getMaxLength(), field.getScale(), field.getSortOrder(),
field.getMaxLength(), field.getScale(), field.getSortOrder(), true); // force to use correct separator byte
}
// Special case for re-writing DESC CHAR or DESC BINARY, to force the re-writing of trailing space characters
else if (field.getDataType() == PChar.INSTANCE || field.getDataType() == PBinary.INSTANCE) {
int len = ptr.getLength();
while (len > 0 && ptr.get()[ptr.getOffset() + len - 1] == StringUtil.SPACE_UTF8) {
len--;
}
ptr.set(ptr.get(), ptr.getOffset(), len);
// Special case for re-writing DESC FLOAT and DOUBLE, as they're not inverted like they should be (PHOENIX-2171)
} else if (field.getDataType() == PFloat.INSTANCE || field.getDataType() == PDouble.INSTANCE) {
byte[] invertedBytes = SortOrder.invert(ptr.get(), ptr.getOffset(), ptr.getLength());
ptr.set(invertedBytes);
}
} else if (field.getDataType() == PBinary.INSTANCE) {
// Remove trailing space characters so that the setValues call below will replace them
// with the correct zero byte character. Note this is somewhat dangerous as these
// could be legit, but I don't know what the alternative is.
int len = ptr.getLength();
while (len > 0 && ptr.get()[ptr.getOffset() + len - 1] == StringUtil.SPACE_UTF8) {
len--;
}
ptr.set(ptr.get(), ptr.getOffset(), len);
}
values[i] = ptr.copyBytes();
}
writeToTable.newKey(ptr, values);
if (Bytes.compareTo(
firstKV.getRowArray(), firstKV.getRowOffset() + offset, firstKV.getRowLength(),
ptr.get(),ptr.getOffset() + offset,ptr.getLength()) == 0) {
continue;
}
byte[] newRow = ByteUtil.copyKeyBytesIfNecessary(ptr);
if (offset > 0) { // for local indexes (prepend region start key)
byte[] newRowWithOffset = new byte[offset + newRow.length];
System.arraycopy(firstKV.getRowArray(), firstKV.getRowOffset(), newRowWithOffset, 0, offset);;
System.arraycopy(newRow, 0, newRowWithOffset, offset, newRow.length);
newRow = newRowWithOffset;
}
byte[] oldRow = Bytes.copy(firstKV.getRowArray(), firstKV.getRowOffset(), firstKV.getRowLength());
for (Cell cell : results) {
// Copy existing cell but with new row key
Cell newCell = new KeyValue(newRow, 0, newRow.length,
cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(),
cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(),
cell.getTimestamp(), KeyValue.Type.codeToType(cell.getTypeByte()),
cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
switch (KeyValue.Type.codeToType(cell.getTypeByte())) {
case Put:
// If Put, point delete old Put
Delete del = new Delete(oldRow);
del.addDeleteMarker(new KeyValue(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(),
cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(),
cell.getQualifierArray(), cell.getQualifierOffset(),
cell.getQualifierLength(), cell.getTimestamp(), KeyValue.Type.Delete,
ByteUtil.EMPTY_BYTE_ARRAY, 0, 0));
mutations.add(del);
Put put = new Put(newRow);
put.add(newCell);
mutations.add(put);
break;
case Delete:
case DeleteColumn:
case DeleteFamily:
case DeleteFamilyVersion:
Delete delete = new Delete(newRow);
delete.addDeleteMarker(newCell);
mutations.add(delete);
break;
}
}
} else if (buildLocalIndex) {
for (IndexMaintainer maintainer : indexMaintainers) {
if (!results.isEmpty()) {
result.getKey(ptr);
ValueGetter valueGetter =
maintainer.createGetterFromKeyValues(
ImmutableBytesPtr.copyBytesIfNecessary(ptr),
results);
Put put = maintainer.buildUpdateMutation(kvBuilder,
valueGetter, ptr, results.get(0).getTimestamp(),
env.getRegion().getRegionInfo().getStartKey(),
env.getRegion().getRegionInfo().getEndKey());
indexMutations.add(put);
}
}
result.setKeyValues(results);
} else if (isDelete) {
// FIXME: the version of the Delete constructor without the lock
// args was introduced in 0.94.4, thus if we try to use it here
// we can no longer use the 0.94.2 version of the client.
Cell firstKV = results.get(0);
Delete delete = new Delete(firstKV.getRowArray(),
firstKV.getRowOffset(), firstKV.getRowLength(),ts);
mutations.add(delete);
// force tephra to ignore this deletes
delete.setAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]);
} else if (isUpsert) {
Arrays.fill(values, null);
int i = 0;
List<PColumn> projectedColumns = projectedTable.getColumns();
for (; i < projectedTable.getPKColumns().size(); i++) {
Expression expression = selectExpressions.get(i);
if (expression.evaluate(result, ptr)) {
values[i] = ptr.copyBytes();
// If SortOrder from expression in SELECT doesn't match the
// column being projected into then invert the bits.
if (expression.getSortOrder() !=
projectedColumns.get(i).getSortOrder()) {
SortOrder.invert(values[i], 0, values[i], 0,
values[i].length);
}
}
}
projectedTable.newKey(ptr, values);
PRow row = projectedTable.newRow(kvBuilder, ts, ptr);
for (; i < projectedColumns.size(); i++) {
Expression expression = selectExpressions.get(i);
if (expression.evaluate(result, ptr)) {
PColumn column = projectedColumns.get(i);
Object value = expression.getDataType()
.toObject(ptr, column.getSortOrder());
// We are guaranteed that the two column will have the
// same type.
if (!column.getDataType().isSizeCompatible(ptr, value,
column.getDataType(), expression.getMaxLength(),
expression.getScale(), column.getMaxLength(),
column.getScale())) {
throw new DataExceedsCapacityException(
column.getDataType(), column.getMaxLength(),
column.getScale());
}
column.getDataType().coerceBytes(ptr, value,
expression.getDataType(), expression.getMaxLength(),
expression.getScale(), expression.getSortOrder(),
column.getMaxLength(), column.getScale(),
column.getSortOrder(), projectedTable.rowKeyOrderOptimizable());
byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr);
row.setValue(column, bytes);
}
}
for (Mutation mutation : row.toRowMutations()) {
mutations.add(mutation);
}
for (i = 0; i < selectExpressions.size(); i++) {
selectExpressions.get(i).reset();
}
} else if (deleteCF != null && deleteCQ != null) {
// No need to search for delete column, since we project only it
// if no empty key value is being set
if (emptyCF == null ||
result.getValue(deleteCF, deleteCQ) != null) {
Delete delete = new Delete(results.get(0).getRowArray(),
results.get(0).getRowOffset(),
results.get(0).getRowLength());
delete.deleteColumns(deleteCF, deleteCQ, ts);
// force tephra to ignore this deletes
delete.setAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]);
mutations.add(delete);
}
}
if (emptyCF != null) {
/*
* If we've specified an emptyCF, then we need to insert an empty
* key value "retroactively" for any key value that is visible at
* the timestamp that the DDL was issued. Key values that are not
* visible at this timestamp will not ever be projected up to
* scans past this timestamp, so don't need to be considered.
* We insert one empty key value per row per timestamp.
*/
Set<Long> timeStamps =
Sets.newHashSetWithExpectedSize(results.size());
for (Cell kv : results) {
long kvts = kv.getTimestamp();
if (!timeStamps.contains(kvts)) {
Put put = new Put(kv.getRowArray(), kv.getRowOffset(),
kv.getRowLength());
put.add(emptyCF, QueryConstants.EMPTY_COLUMN_BYTES, kvts,
ByteUtil.EMPTY_BYTE_ARRAY);
mutations.add(put);
}
}
}
// Commit in batches based on UPSERT_BATCH_SIZE_ATTRIB in config
if (!mutations.isEmpty() && batchSize > 0 &&
mutations.size() % batchSize == 0) {
commitBatch(region, mutations, indexUUID, blockingMemStoreSize);
mutations.clear();
}
// Commit in batches based on UPSERT_BATCH_SIZE_ATTRIB in config
if (!indexMutations.isEmpty() && batchSize > 0 &&
indexMutations.size() % batchSize == 0) {
commitBatch(region, indexMutations, null, blockingMemStoreSize);
indexMutations.clear();
}
} catch (ConstraintViolationException e) {
// Log and ignore in count
logger.error(LogUtil.addCustomAnnotations("Failed to create row in " +
region.getRegionInfo().getRegionNameAsString() + " with values " +
SchemaUtil.toString(values),
ScanUtil.getCustomAnnotations(scan)), e);
continue;
}
aggregators.aggregate(rowAggregators, result);
hasAny = true;
}
} while (hasMore);
if (!mutations.isEmpty()) {
commitBatch(region,mutations, indexUUID, blockingMemStoreSize);
}
if (!indexMutations.isEmpty()) {
commitBatch(region,indexMutations, null, blockingMemStoreSize);
indexMutations.clear();
}
}
} finally {
if(needToWrite) {
synchronized (lock) {
scansReferenceCount--;
}
}
try {
innerScanner.close();
} finally {
if (acquiredLock) region.closeRegionOperation();
}
}
if (logger.isDebugEnabled()) {
logger.debug(LogUtil.addCustomAnnotations("Finished scanning " + rowCount + " rows for ungrouped coprocessor scan " + scan, ScanUtil.getCustomAnnotations(scan)));
}
final boolean hadAny = hasAny;
KeyValue keyValue = null;
if (hadAny) {
byte[] value = aggregators.toBytes(rowAggregators);
keyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
}
final KeyValue aggKeyValue = keyValue;
RegionScanner scanner = new BaseRegionScanner(innerScanner) {
private boolean done = !hadAny;
@Override
public boolean isFilterDone() {
return done;
}
@Override
public boolean next(List<Cell> results) throws IOException {
if (done) return false;
done = true;
results.add(aggKeyValue);
return false;
}
@Override
public long getMaxResultSize() {
return scan.getMaxResultSize();
}
};
return scanner;
}
|
#vulnerable code
@Override
protected RegionScanner doPostScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c, final Scan scan, final RegionScanner s) throws IOException {
RegionCoprocessorEnvironment env = c.getEnvironment();
Region region = env.getRegion();
long ts = scan.getTimeRange().getMax();
boolean localIndexScan = ScanUtil.isLocalIndex(scan);
if (ScanUtil.isAnalyzeTable(scan)) {
byte[] gp_width_bytes =
scan.getAttribute(BaseScannerRegionObserver.GUIDEPOST_WIDTH_BYTES);
byte[] gp_per_region_bytes =
scan.getAttribute(BaseScannerRegionObserver.GUIDEPOST_PER_REGION);
// Let this throw, as this scan is being done for the sole purpose of collecting stats
StatisticsCollector statsCollector = StatisticsCollectorFactory.createStatisticsCollector(
env, region.getRegionInfo().getTable().getNameAsString(), ts,
gp_width_bytes, gp_per_region_bytes);
return collectStats(s, statsCollector, region, scan, env.getConfiguration());
}
int offsetToBe = 0;
if (localIndexScan) {
/*
* For local indexes, we need to set an offset on row key expressions to skip
* the region start key.
*/
offsetToBe = region.getRegionInfo().getStartKey().length != 0 ? region.getRegionInfo().getStartKey().length :
region.getRegionInfo().getEndKey().length;
ScanUtil.setRowKeyOffset(scan, offsetToBe);
}
final int offset = offsetToBe;
PTable projectedTable = null;
PTable writeToTable = null;
byte[][] values = null;
byte[] descRowKeyTableBytes = scan.getAttribute(UPGRADE_DESC_ROW_KEY);
boolean isDescRowKeyOrderUpgrade = descRowKeyTableBytes != null;
if (isDescRowKeyOrderUpgrade) {
logger.debug("Upgrading row key for " + region.getRegionInfo().getTable().getNameAsString());
projectedTable = deserializeTable(descRowKeyTableBytes);
try {
writeToTable = PTableImpl.makePTable(projectedTable, true);
} catch (SQLException e) {
ServerUtil.throwIOException("Upgrade failed", e); // Impossible
}
values = new byte[projectedTable.getPKColumns().size()][];
}
byte[] localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD);
List<IndexMaintainer> indexMaintainers = localIndexBytes == null ? null : IndexMaintainer.deserialize(localIndexBytes);
List<Mutation> indexMutations = localIndexBytes == null ? Collections.<Mutation>emptyList() : Lists.<Mutation>newArrayListWithExpectedSize(1024);
RegionScanner theScanner = s;
byte[] indexUUID = scan.getAttribute(PhoenixIndexCodec.INDEX_UUID);
List<Expression> selectExpressions = null;
byte[] upsertSelectTable = scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_TABLE);
boolean isUpsert = false;
boolean isDelete = false;
byte[] deleteCQ = null;
byte[] deleteCF = null;
byte[] emptyCF = null;
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
if (upsertSelectTable != null) {
isUpsert = true;
projectedTable = deserializeTable(upsertSelectTable);
selectExpressions = deserializeExpressions(scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_EXPRS));
values = new byte[projectedTable.getPKColumns().size()][];
} else {
byte[] isDeleteAgg = scan.getAttribute(BaseScannerRegionObserver.DELETE_AGG);
isDelete = isDeleteAgg != null && Bytes.compareTo(PDataType.TRUE_BYTES, isDeleteAgg) == 0;
if (!isDelete) {
deleteCF = scan.getAttribute(BaseScannerRegionObserver.DELETE_CF);
deleteCQ = scan.getAttribute(BaseScannerRegionObserver.DELETE_CQ);
}
emptyCF = scan.getAttribute(BaseScannerRegionObserver.EMPTY_CF);
}
TupleProjector tupleProjector = null;
byte[][] viewConstants = null;
ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan);
final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
if ((localIndexScan && !isDelete && !isDescRowKeyOrderUpgrade) || (j == null && p != null)) {
if (dataColumns != null) {
tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns);
viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan);
}
ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
theScanner =
getWrappedScanner(c, theScanner, offset, scan, dataColumns, tupleProjector,
region, indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr);
}
if (j != null) {
theScanner = new HashJoinRegionScanner(theScanner, p, j, ScanUtil.getTenantId(scan), env);
}
int batchSize = 0;
List<Mutation> mutations = Collections.emptyList();
boolean needToWrite = false;
Configuration conf = c.getEnvironment().getConfiguration();
long flushSize = region.getTableDesc().getMemStoreFlushSize();
if (flushSize <= 0) {
flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE,
HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE);
}
/**
* Upper bound of memstore size allowed for region. Updates will be blocked until the flush
* happen if the memstore reaches this threshold.
*/
final long blockingMemStoreSize = flushSize * (
conf.getLong(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER,
HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER)-1) ;
boolean buildLocalIndex = indexMaintainers != null && dataColumns==null && !localIndexScan;
if (isDescRowKeyOrderUpgrade || isDelete || isUpsert || (deleteCQ != null && deleteCF != null) || emptyCF != null || buildLocalIndex) {
needToWrite = true;
// TODO: size better
mutations = Lists.newArrayListWithExpectedSize(1024);
batchSize = env.getConfiguration().getInt(MUTATE_BATCH_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
}
Aggregators aggregators = ServerAggregators.deserialize(
scan.getAttribute(BaseScannerRegionObserver.AGGREGATORS), env.getConfiguration());
Aggregator[] rowAggregators = aggregators.getAggregators();
boolean hasMore;
boolean hasAny = false;
MultiKeyValueTuple result = new MultiKeyValueTuple();
if (logger.isDebugEnabled()) {
logger.debug(LogUtil.addCustomAnnotations("Starting ungrouped coprocessor scan " + scan + " "+region.getRegionInfo(), ScanUtil.getCustomAnnotations(scan)));
}
long rowCount = 0;
final RegionScanner innerScanner = theScanner;
boolean acquiredLock = false;
try {
if(needToWrite) {
synchronized (lock) {
scansReferenceCount++;
}
}
region.startRegionOperation();
acquiredLock = true;
synchronized (innerScanner) {
do {
List<Cell> results = new ArrayList<Cell>();
// Results are potentially returned even when the return value of s.next is false
// since this is an indication of whether or not there are more values after the
// ones returned
hasMore = innerScanner.nextRaw(results);
if (!results.isEmpty()) {
rowCount++;
result.setKeyValues(results);
try {
if (isDescRowKeyOrderUpgrade) {
Arrays.fill(values, null);
Cell firstKV = results.get(0);
RowKeySchema schema = projectedTable.getRowKeySchema();
int maxOffset = schema.iterator(firstKV.getRowArray(), firstKV.getRowOffset() + offset, firstKV.getRowLength(), ptr);
for (int i = 0; i < schema.getFieldCount(); i++) {
Boolean hasValue = schema.next(ptr, i, maxOffset);
if (hasValue == null) {
break;
}
Field field = schema.getField(i);
if (field.getSortOrder() == SortOrder.DESC) {
// Special case for re-writing DESC ARRAY, as the actual byte value needs to change in this case
if (field.getDataType().isArrayType()) {
field.getDataType().coerceBytes(ptr, null, field.getDataType(),
field.getMaxLength(), field.getScale(), field.getSortOrder(),
field.getMaxLength(), field.getScale(), field.getSortOrder(), true); // force to use correct separator byte
}
// Special case for re-writing DESC CHAR or DESC BINARY, to force the re-writing of trailing space characters
else if (field.getDataType() == PChar.INSTANCE || field.getDataType() == PBinary.INSTANCE) {
int len = ptr.getLength();
while (len > 0 && ptr.get()[ptr.getOffset() + len - 1] == StringUtil.SPACE_UTF8) {
len--;
}
ptr.set(ptr.get(), ptr.getOffset(), len);
// Special case for re-writing DESC FLOAT and DOUBLE, as they're not inverted like they should be (PHOENIX-2171)
} else if (field.getDataType() == PFloat.INSTANCE || field.getDataType() == PDouble.INSTANCE) {
byte[] invertedBytes = SortOrder.invert(ptr.get(), ptr.getOffset(), ptr.getLength());
ptr.set(invertedBytes);
}
} else if (field.getDataType() == PBinary.INSTANCE) {
// Remove trailing space characters so that the setValues call below will replace them
// with the correct zero byte character. Note this is somewhat dangerous as these
// could be legit, but I don't know what the alternative is.
int len = ptr.getLength();
while (len > 0 && ptr.get()[ptr.getOffset() + len - 1] == StringUtil.SPACE_UTF8) {
len--;
}
ptr.set(ptr.get(), ptr.getOffset(), len);
}
values[i] = ptr.copyBytes();
}
writeToTable.newKey(ptr, values);
if (Bytes.compareTo(
firstKV.getRowArray(), firstKV.getRowOffset() + offset, firstKV.getRowLength(),
ptr.get(),ptr.getOffset() + offset,ptr.getLength()) == 0) {
continue;
}
byte[] newRow = ByteUtil.copyKeyBytesIfNecessary(ptr);
if (offset > 0) { // for local indexes (prepend region start key)
byte[] newRowWithOffset = new byte[offset + newRow.length];
System.arraycopy(firstKV.getRowArray(), firstKV.getRowOffset(), newRowWithOffset, 0, offset);;
System.arraycopy(newRow, 0, newRowWithOffset, offset, newRow.length);
newRow = newRowWithOffset;
}
byte[] oldRow = Bytes.copy(firstKV.getRowArray(), firstKV.getRowOffset(), firstKV.getRowLength());
for (Cell cell : results) {
// Copy existing cell but with new row key
Cell newCell = new KeyValue(newRow, 0, newRow.length,
cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(),
cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(),
cell.getTimestamp(), KeyValue.Type.codeToType(cell.getTypeByte()),
cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
switch (KeyValue.Type.codeToType(cell.getTypeByte())) {
case Put:
// If Put, point delete old Put
Delete del = new Delete(oldRow);
del.addDeleteMarker(new KeyValue(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(),
cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(),
cell.getQualifierArray(), cell.getQualifierOffset(),
cell.getQualifierLength(), cell.getTimestamp(), KeyValue.Type.Delete,
ByteUtil.EMPTY_BYTE_ARRAY, 0, 0));
mutations.add(del);
Put put = new Put(newRow);
put.add(newCell);
mutations.add(put);
break;
case Delete:
case DeleteColumn:
case DeleteFamily:
case DeleteFamilyVersion:
Delete delete = new Delete(newRow);
delete.addDeleteMarker(newCell);
mutations.add(delete);
break;
}
}
} else if (buildLocalIndex) {
for (IndexMaintainer maintainer : indexMaintainers) {
if (!results.isEmpty()) {
result.getKey(ptr);
ValueGetter valueGetter =
maintainer.createGetterFromKeyValues(
ImmutableBytesPtr.copyBytesIfNecessary(ptr),
results);
Put put = maintainer.buildUpdateMutation(kvBuilder,
valueGetter, ptr, results.get(0).getTimestamp(),
env.getRegion().getRegionInfo().getStartKey(),
env.getRegion().getRegionInfo().getEndKey());
indexMutations.add(put);
}
}
result.setKeyValues(results);
} else if (isDelete) {
// FIXME: the version of the Delete constructor without the lock
// args was introduced in 0.94.4, thus if we try to use it here
// we can no longer use the 0.94.2 version of the client.
Cell firstKV = results.get(0);
Delete delete = new Delete(firstKV.getRowArray(),
firstKV.getRowOffset(), firstKV.getRowLength(),ts);
mutations.add(delete);
// force tephra to ignore this deletes
delete.setAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]);
} else if (isUpsert) {
Arrays.fill(values, null);
int i = 0;
List<PColumn> projectedColumns = projectedTable.getColumns();
for (; i < projectedTable.getPKColumns().size(); i++) {
Expression expression = selectExpressions.get(i);
if (expression.evaluate(result, ptr)) {
values[i] = ptr.copyBytes();
// If SortOrder from expression in SELECT doesn't match the
// column being projected into then invert the bits.
if (expression.getSortOrder() !=
projectedColumns.get(i).getSortOrder()) {
SortOrder.invert(values[i], 0, values[i], 0,
values[i].length);
}
}
}
projectedTable.newKey(ptr, values);
PRow row = projectedTable.newRow(kvBuilder, ts, ptr);
for (; i < projectedColumns.size(); i++) {
Expression expression = selectExpressions.get(i);
if (expression.evaluate(result, ptr)) {
PColumn column = projectedColumns.get(i);
Object value = expression.getDataType()
.toObject(ptr, column.getSortOrder());
// We are guaranteed that the two column will have the
// same type.
if (!column.getDataType().isSizeCompatible(ptr, value,
column.getDataType(), expression.getMaxLength(),
expression.getScale(), column.getMaxLength(),
column.getScale())) {
throw new DataExceedsCapacityException(
column.getDataType(), column.getMaxLength(),
column.getScale());
}
column.getDataType().coerceBytes(ptr, value,
expression.getDataType(), expression.getMaxLength(),
expression.getScale(), expression.getSortOrder(),
column.getMaxLength(), column.getScale(),
column.getSortOrder(), projectedTable.rowKeyOrderOptimizable());
byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr);
row.setValue(column, bytes);
}
}
for (Mutation mutation : row.toRowMutations()) {
mutations.add(mutation);
}
for (i = 0; i < selectExpressions.size(); i++) {
selectExpressions.get(i).reset();
}
} else if (deleteCF != null && deleteCQ != null) {
// No need to search for delete column, since we project only it
// if no empty key value is being set
if (emptyCF == null ||
result.getValue(deleteCF, deleteCQ) != null) {
Delete delete = new Delete(results.get(0).getRowArray(),
results.get(0).getRowOffset(),
results.get(0).getRowLength());
delete.deleteColumns(deleteCF, deleteCQ, ts);
// force tephra to ignore this deletes
delete.setAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]);
mutations.add(delete);
}
}
if (emptyCF != null) {
/*
* If we've specified an emptyCF, then we need to insert an empty
* key value "retroactively" for any key value that is visible at
* the timestamp that the DDL was issued. Key values that are not
* visible at this timestamp will not ever be projected up to
* scans past this timestamp, so don't need to be considered.
* We insert one empty key value per row per timestamp.
*/
Set<Long> timeStamps =
Sets.newHashSetWithExpectedSize(results.size());
for (Cell kv : results) {
long kvts = kv.getTimestamp();
if (!timeStamps.contains(kvts)) {
Put put = new Put(kv.getRowArray(), kv.getRowOffset(),
kv.getRowLength());
put.add(emptyCF, QueryConstants.EMPTY_COLUMN_BYTES, kvts,
ByteUtil.EMPTY_BYTE_ARRAY);
mutations.add(put);
}
}
}
// Commit in batches based on UPSERT_BATCH_SIZE_ATTRIB in config
if (!mutations.isEmpty() && batchSize > 0 &&
mutations.size() % batchSize == 0) {
commitBatch(region, mutations, indexUUID, blockingMemStoreSize);
mutations.clear();
}
// Commit in batches based on UPSERT_BATCH_SIZE_ATTRIB in config
if (!indexMutations.isEmpty() && batchSize > 0 &&
indexMutations.size() % batchSize == 0) {
commitBatch(region, indexMutations, null, blockingMemStoreSize);
indexMutations.clear();
}
} catch (ConstraintViolationException e) {
// Log and ignore in count
logger.error(LogUtil.addCustomAnnotations("Failed to create row in " +
region.getRegionInfo().getRegionNameAsString() + " with values " +
SchemaUtil.toString(values),
ScanUtil.getCustomAnnotations(scan)), e);
continue;
}
aggregators.aggregate(rowAggregators, result);
hasAny = true;
}
} while (hasMore);
if (!mutations.isEmpty()) {
commitBatch(region,mutations, indexUUID, blockingMemStoreSize);
}
if (!indexMutations.isEmpty()) {
commitBatch(region,indexMutations, null, blockingMemStoreSize);
indexMutations.clear();
}
}
} finally {
if(needToWrite) {
synchronized (lock) {
scansReferenceCount--;
}
}
try {
innerScanner.close();
} finally {
if (acquiredLock) region.closeRegionOperation();
}
}
if (logger.isDebugEnabled()) {
logger.debug(LogUtil.addCustomAnnotations("Finished scanning " + rowCount + " rows for ungrouped coprocessor scan " + scan, ScanUtil.getCustomAnnotations(scan)));
}
final boolean hadAny = hasAny;
KeyValue keyValue = null;
if (hadAny) {
byte[] value = aggregators.toBytes(rowAggregators);
keyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
}
final KeyValue aggKeyValue = keyValue;
RegionScanner scanner = new BaseRegionScanner(innerScanner) {
private boolean done = !hadAny;
@Override
public boolean isFilterDone() {
return done;
}
@Override
public boolean next(List<Cell> results) throws IOException {
if (done) return false;
done = true;
results.add(aggKeyValue);
return false;
}
@Override
public long getMaxResultSize() {
return scan.getMaxResultSize();
}
};
return scanner;
}
#location 404
#vulnerability type THREAD_SAFETY_VIOLATION
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Test
public void testUpsertDeleteWithOldClient() throws Exception {
// Insert data with old client and read with new client
executeQueryWithClientVersion(compatibleClientVersion, CREATE_ADD);
executeQueriesWithCurrentVersion(QUERY);
assertExpectedOutput(CREATE_ADD, QUERY);
// Deletes with the old client
executeQueryWithClientVersion(compatibleClientVersion, ADD_DELETE);
executeQueryWithClientVersion(compatibleClientVersion, QUERY_ADD_DELETE);
assertExpectedOutput(ADD_DELETE, QUERY_ADD_DELETE);
}
|
#vulnerable code
@Test
public void testUpsertDeleteWithOldClient() throws Exception {
checkForPreConditions();
// Insert data with old client and read with new client
executeQueryWithClientVersion(compatibleClientVersion, CREATE_ADD);
executeQueriesWithCurrentVersion(QUERY);
assertTrue(compareOutput(CREATE_ADD, QUERY));
// Deletes with the old client
executeQueryWithClientVersion(compatibleClientVersion, ADD_DELETE);
executeQueryWithClientVersion(compatibleClientVersion, QUERY_ADD_DELETE);
assertTrue(compareOutput(ADD_DELETE, QUERY_ADD_DELETE));
}
#location 3
#vulnerability type THREAD_SAFETY_VIOLATION
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Test
public void testDeleteRowFromTableWithImmutableIndex() throws SQLException {
testDeleteRowFromTableWithImmutableIndex(false);
}
|
#vulnerable code
@Test
public void testDeleteRowFromTableWithImmutableIndex() throws SQLException {
Connection con = null;
try {
boolean autoCommit = false;
con = DriverManager.getConnection(getUrl());
con.setAutoCommit(autoCommit);
Statement stm = con.createStatement();
stm.execute("CREATE TABLE IF NOT EXISTS web_stats (" +
"HOST CHAR(2) NOT NULL," +
"DOMAIN VARCHAR NOT NULL, " +
"FEATURE VARCHAR NOT NULL, " +
"DATE DATE NOT NULL, \n" +
"USAGE.CORE BIGINT," +
"USAGE.DB BIGINT," +
"STATS.ACTIVE_VISITOR INTEGER " +
"CONSTRAINT PK PRIMARY KEY (HOST, DOMAIN, FEATURE, DATE)) IMMUTABLE_ROWS=true");
stm.execute("CREATE INDEX web_stats_idx ON web_stats (DATE, FEATURE)");
stm.close();
Date date = new Date(0);
PreparedStatement psInsert = con
.prepareStatement("UPSERT INTO web_stats(HOST, DOMAIN, FEATURE, DATE, CORE, DB, ACTIVE_VISITOR) VALUES(?,?, ? , ?, ?, ?, ?)");
psInsert.setString(1, "AA");
psInsert.setString(2, "BB");
psInsert.setString(3, "CC");
psInsert.setDate(4, date);
psInsert.setLong(5, 1L);
psInsert.setLong(6, 2L);
psInsert.setLong(7, 3);
psInsert.execute();
psInsert.close();
if (!autoCommit) {
con.commit();
}
psInsert = con.prepareStatement("DELETE FROM web_stats WHERE (HOST, DOMAIN, FEATURE, DATE) = (?,?,?,?)");
psInsert.setString(1, "AA");
psInsert.setString(2, "BB");
psInsert.setString(3, "CC");
psInsert.setDate(4, date);
psInsert.execute();
if (!autoCommit) {
con.commit();
}
ResultSet rs = con.createStatement().executeQuery("SELECT /*+ NO_INDEX */ count(*) FROM web_stats");
assertTrue(rs.next());
assertEquals(0, rs.getLong(1));
rs = con.createStatement().executeQuery("SELECT count(*) FROM web_stats_idx");
assertTrue(rs.next());
assertEquals(0, rs.getLong(1));
} finally {
try {
con.close();
} catch (Exception ex) {
}
}
}
#location 58
#vulnerability type NULL_DEREFERENCE
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Test
public void writeMetrics() throws Exception {
Connection conn = getConnectionWithoutTracing();
latch = new CountDownLatch(1);
testTraceWriter.start();
// create a simple metrics record
long traceid = 987654;
String description = "Some generic trace";
long spanid = 10;
long parentid = 11;
long startTime = 12;
long endTime = 13;
String processid = "Some process";
String annotation = "test annotation for a span";
Span span = createNewSpan(traceid, parentid, spanid, description, startTime, endTime,
processid, annotation);
Tracer.getInstance().deliver(span);
assertTrue("Span never committed to table", latch.await(30, TimeUnit.SECONDS));
// make sure we only get expected stat entry (matcing the trace id), otherwise we could the
// stats for the update as well
TraceReader reader = new TraceReader(conn, tracingTableName);
Collection<TraceHolder> traces = reader.readAll(10);
assertEquals("Wrong number of traces in the tracing table", 1, traces.size());
// validate trace
TraceHolder trace = traces.iterator().next();
// we are just going to get an orphan span b/c we don't send in a parent
assertEquals("Didn't get expected orphaned spans!" + trace.orphans, 1, trace.orphans.size());
assertEquals(traceid, trace.traceid);
SpanInfo spanInfo = trace.orphans.get(0);
assertEquals(description, spanInfo.description);
assertEquals(parentid, spanInfo.getParentIdForTesting());
assertEquals(startTime, spanInfo.start);
assertEquals(endTime, spanInfo.end);
assertEquals("Wrong number of tags", 0, spanInfo.tagCount);
assertEquals("Wrong number of annotations", 1, spanInfo.annotationCount);
}
|
#vulnerable code
@Test
public void writeMetrics() throws Exception {
Connection conn = getConnectionWithoutTracing();
String tableName = generateUniqueName();
TraceSpanReceiver traceSpanReceiver = new TraceSpanReceiver();
latch = new CountDownLatch(1);
testTraceWriter = new TestTraceWriter(tableName, defaultTracingThreadPoolForTest, defaultTracingBatchSizeForTest);
// create a simple metrics record
long traceid = 987654;
String description = "Some generic trace";
long spanid = 10;
long parentid = 11;
long startTime = 12;
long endTime = 13;
String processid = "Some process";
String annotation = "test annotation for a span";
Span span = createNewSpan(traceid, parentid, spanid, description, startTime, endTime,
processid, annotation);
traceSpanReceiver.getSpanQueue().add(span);
assertTrue("Span never committed to table", latch.await(30, TimeUnit.SECONDS));
// make sure we only get expected stat entry (matcing the trace id), otherwise we could the
// stats for the update as well
TraceReader reader = new TraceReader(conn, tableName);
Collection<TraceHolder> traces = reader.readAll(10);
assertEquals("Wrong number of traces in the tracing table", 1, traces.size());
// validate trace
TraceHolder trace = traces.iterator().next();
// we are just going to get an orphan span b/c we don't send in a parent
assertEquals("Didn't get expected orphaned spans!" + trace.orphans, 1, trace.orphans.size());
assertEquals(traceid, trace.traceid);
SpanInfo spanInfo = trace.orphans.get(0);
assertEquals(description, spanInfo.description);
assertEquals(parentid, spanInfo.getParentIdForTesting());
assertEquals(startTime, spanInfo.start);
assertEquals(endTime, spanInfo.end);
assertEquals("Wrong number of tags", 0, spanInfo.tagCount);
assertEquals("Wrong number of annotations", 1, spanInfo.annotationCount);
}
#location 23
#vulnerability type RESOURCE_LEAK
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
@Override
public boolean next(List<Cell> results) throws IOException {
if (indexRowKey != null &&
singleRowRebuildReturnCode == GlobalIndexChecker.RebuildReturnCode.NO_DATA_ROW.getValue()) {
byte[] rowCountBytes =
PLong.INSTANCE.toBytes(Long.valueOf(singleRowRebuildReturnCode));
final Cell aggKeyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY,
SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length);
results.add(aggKeyValue);
return false;
}
Cell lastCell = null;
int rowCount = 0;
region.startRegionOperation();
RegionScanner localScanner = null;
try {
byte[] uuidValue = ServerCacheClient.generateId();
localScanner = getLocalScanner();
if (localScanner == null) {
return false;
}
synchronized (localScanner) {
if (!shouldVerify()) {
skipped = true;
return false;
}
do {
/**
If region is closing and there are large number of rows being verified/rebuilt with IndexTool,
not having this check will impact/delay the region closing -- affecting the availability
as this method holds the read lock on the region.
* */
ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting();
List<Cell> row = new ArrayList<Cell>();
hasMore = localScanner.nextRaw(row);
if (!row.isEmpty()) {
lastCell = row.get(0); // lastCell is any cell from the last visited row
Put put = null;
Delete del = null;
for (Cell cell : row) {
if (KeyValue.Type.codeToType(cell.getTypeByte()) == KeyValue.Type.Put) {
if (!partialRebuild && familyMap != null && !isColumnIncluded(cell)) {
continue;
}
if (put == null) {
put = new Put(CellUtil.cloneRow(cell));
}
put.add(cell);
} else {
if (del == null) {
del = new Delete(CellUtil.cloneRow(cell));
}
del.addDeleteMarker(cell);
}
}
if (put == null && del == null) {
continue;
}
// Always add the put first and then delete for a given row. This simplifies the logic in
// IndexRegionObserver
if (put != null) {
mutations.add(put);
}
if (del != null) {
mutations.add(del);
}
if (!verify) {
if (put != null) {
setMutationAttributes(put, uuidValue);
}
if (del != null) {
setMutationAttributes(del, uuidValue);
}
uuidValue = commitIfReady(uuidValue, mutations);
} else {
byte[] dataKey = (put != null) ? put.getRow() : del.getRow();
prepareIndexMutations(put, del);
dataKeyToMutationMap.put(dataKey, new Pair<Put, Delete>(put, del));
}
rowCount++;
}
} while (hasMore && rowCount < pageSizeInRows);
if (!mutations.isEmpty()) {
if (verify) {
verifyAndOrRebuildIndex();
} else {
ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting();
ungroupedAggregateRegionObserver.commitBatchWithRetries(region, mutations, blockingMemstoreSize);
}
}
}
} catch (Throwable e) {
LOGGER.error("Exception in IndexRebuildRegionScanner for region "
+ region.getRegionInfo().getRegionNameAsString(), e);
throw e;
} finally {
region.closeRegionOperation();
mutations.clear();
if (verify) {
dataKeyToMutationMap.clear();
indexKeyToMutationMap.clear();
}
if (localScanner!=null && localScanner!=innerScanner) {
localScanner.close();
}
}
if (indexRowKey != null) {
rowCount = singleRowRebuildReturnCode;
}
if (minTimestamp != 0) {
nextStartKey = ByteUtil.calculateTheClosestNextRowKeyForPrefix(CellUtil.cloneRow(lastCell));
}
byte[] rowCountBytes = PLong.INSTANCE.toBytes(Long.valueOf(rowCount));
final Cell aggKeyValue;
if (lastCell == null) {
aggKeyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY,
SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length);
} else {
aggKeyValue = PhoenixKeyValueUtil.newKeyValue(CellUtil.cloneRow(lastCell), SINGLE_COLUMN_FAMILY,
SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length);
}
results.add(aggKeyValue);
return hasMore || hasMoreIncr;
}
|
#vulnerable code
@Override
public boolean next(List<Cell> results) throws IOException {
if (indexRowKey != null &&
singleRowRebuildReturnCode == GlobalIndexChecker.RebuildReturnCode.NO_DATA_ROW.getValue()) {
byte[] rowCountBytes =
PLong.INSTANCE.toBytes(Long.valueOf(singleRowRebuildReturnCode));
final Cell aggKeyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY,
SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length);
results.add(aggKeyValue);
return false;
}
Cell lastCell = null;
int rowCount = 0;
region.startRegionOperation();
RegionScanner localScanner = null;
try {
byte[] uuidValue = ServerCacheClient.generateId();
localScanner = getLocalScanner();
if (localScanner == null) {
return false;
}
synchronized (localScanner) {
if (!shouldVerify()) {
skipped = true;
return false;
}
do {
List<Cell> row = new ArrayList<Cell>();
hasMore = localScanner.nextRaw(row);
if (!row.isEmpty()) {
lastCell = row.get(0); // lastCell is any cell from the last visited row
Put put = null;
Delete del = null;
for (Cell cell : row) {
if (KeyValue.Type.codeToType(cell.getTypeByte()) == KeyValue.Type.Put) {
if (!partialRebuild && familyMap != null && !isColumnIncluded(cell)) {
continue;
}
if (put == null) {
put = new Put(CellUtil.cloneRow(cell));
}
put.add(cell);
} else {
if (del == null) {
del = new Delete(CellUtil.cloneRow(cell));
}
del.addDeleteMarker(cell);
}
}
if (put == null && del == null) {
continue;
}
// Always add the put first and then delete for a given row. This simplifies the logic in
// IndexRegionObserver
if (put != null) {
mutations.add(put);
}
if (del != null) {
mutations.add(del);
}
if (!verify) {
if (put != null) {
setMutationAttributes(put, uuidValue);
}
if (del != null) {
setMutationAttributes(del, uuidValue);
}
uuidValue = commitIfReady(uuidValue, mutations);
} else {
byte[] dataKey = (put != null) ? put.getRow() : del.getRow();
prepareIndexMutations(put, del);
dataKeyToMutationMap.put(dataKey, new Pair<Put, Delete>(put, del));
}
rowCount++;
}
} while (hasMore && rowCount < pageSizeInRows);
if (!mutations.isEmpty()) {
if (verify) {
verifyAndOrRebuildIndex();
} else {
ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting();
ungroupedAggregateRegionObserver.commitBatchWithRetries(region, mutations, blockingMemstoreSize);
}
}
}
} catch (Throwable e) {
LOGGER.error("Exception in IndexRebuildRegionScanner for region "
+ region.getRegionInfo().getRegionNameAsString(), e);
throw e;
} finally {
region.closeRegionOperation();
mutations.clear();
if (verify) {
dataKeyToMutationMap.clear();
indexKeyToMutationMap.clear();
}
if (localScanner!=null && localScanner!=innerScanner) {
localScanner.close();
}
}
if (indexRowKey != null) {
rowCount = singleRowRebuildReturnCode;
}
if (minTimestamp != 0) {
nextStartKey = ByteUtil.calculateTheClosestNextRowKeyForPrefix(CellUtil.cloneRow(lastCell));
}
byte[] rowCountBytes = PLong.INSTANCE.toBytes(Long.valueOf(rowCount));
final Cell aggKeyValue;
if (lastCell == null) {
aggKeyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY,
SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length);
} else {
aggKeyValue = PhoenixKeyValueUtil.newKeyValue(CellUtil.cloneRow(lastCell), SINGLE_COLUMN_FAMILY,
SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length);
}
results.add(aggKeyValue);
return hasMore || hasMoreIncr;
}
#location 94
#vulnerability type THREAD_SAFETY_VIOLATION
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#fixed code
public static long convertToNanoseconds(long serverTimeStamp) {
return serverTimeStamp * TransactionFactory.getTransactionProvider().getTransactionContext().getMaxTransactionsPerSecond();
}
|
#vulnerable code
public static long convertToNanoseconds(long serverTimeStamp) {
return serverTimeStamp * TransactionFactory.getTransactionFactory().getTransactionContext().getMaxTransactionsPerSecond();
}
#location 2
#vulnerability type NULL_DEREFERENCE
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.