output
stringlengths
64
73.2k
input
stringlengths
208
73.3k
instruction
stringclasses
1 value
#fixed code @Test public void findWhere() { class Book { public final String title; public final String author; public final Integer year; public Book(final String title, final String author, final Integer year) { this.title = title; this.author = author; this.year = year; } public String toString() { return "title: " + title + ", author: " + author + ", year: " + year; } }; List<Book> listOfPlays = new ArrayList<Book>() {{ add(new Book("Cymbeline2", "Shakespeare", 1614)); add(new Book("Cymbeline", "Shakespeare", 1611)); add(new Book("The Tempest", "Shakespeare", 1611)); }}; assertEquals("title: Cymbeline, author: Shakespeare, year: 1611", _.findWhere(listOfPlays, asList( Tuple.<String, Object>create("author", "Shakespeare"), Tuple.<String, Object>create("year", Integer.valueOf(1611)))).get().toString()); assertEquals("title: Cymbeline, author: Shakespeare, year: 1611", _.findWhere(listOfPlays, asList( Tuple.<String, Object>create("author", "Shakespeare"), Tuple.<String, Object>create("author2", "Shakespeare"), Tuple.<String, Object>create("year", Integer.valueOf(1611)))).get().toString()); }
#vulnerable code @Test public void findWhere() { class Book { public final String title; public final String author; public final Integer year; public Book(final String title, final String author, final Integer year) { this.title = title; this.author = author; this.year = year; } public String toString() { return "title: " + title + ", author: " + author + ", year: " + year; } }; List<Book> listOfPlays = new ArrayList<Book>() {{ add(new Book("Cymbeline2", "Shakespeare", 1614)); add(new Book("Cymbeline", "Shakespeare", 1611)); add(new Book("The Tempest", "Shakespeare", 1611)); }}; assertEquals("title: Cymbeline, author: Shakespeare, year: 1611", _.findWhere(listOfPlays, asList( Tuple.<String, Object>create("author", "Shakespeare"), Tuple.<String, Object>create("year", Integer.valueOf(1611)))).toString()); assertEquals("title: Cymbeline, author: Shakespeare, year: 1611", _.findWhere(listOfPlays, asList( Tuple.<String, Object>create("author", "Shakespeare"), Tuple.<String, Object>create("author2", "Shakespeare"), Tuple.<String, Object>create("year", Integer.valueOf(1611)))).toString()); } #location 25 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void singleOrNull() { U<Integer> uWithMoreElement = new U<>(asList(1, 2, 3)); U<Integer> uWithOneElement = new U<>(singletonList(1)); final Integer result1 = U.singleOrNull(asList(1, 2, 3)); assertNull(result1); final int result2 = U.singleOrNull(singletonList(1)); assertEquals(1, result2); final Integer result3 = U.singleOrNull(new ArrayList<>()); assertNull(result3); final Integer result4 = U.singleOrNull(asList(1, 2, 3), item -> item % 2 == 1); assertNull(result4); final int result5 = U.singleOrNull(asList(1, 2, 3), item -> item % 2 == 0); assertEquals(2, result5); final Integer result6 = U.singleOrNull(asList(1, 2, 3), item -> item == 5); assertNull(result6); final Integer result7 = uWithMoreElement.singleOrNull(); assertNull(result7); final Integer result8 = uWithOneElement.singleOrNull(); assertEquals(result8, Integer.valueOf(1)); final Integer result9 = uWithMoreElement.singleOrNull(item -> item % 2 == 0); assertEquals(result9, Integer.valueOf(2)); final Integer result10 = uWithMoreElement.singleOrNull(item -> item % 2 == 1); assertNull(result10); }
#vulnerable code @Test public void singleOrNull() { U<Integer> uWithMoreElement = new U<>(asList(1, 2, 3)); U<Integer> uWithOneElement = new U<>(asList(1)); final Integer result1 = U.singleOrNull(asList(1, 2, 3)); assertNull(result1); final int result2 = U.singleOrNull(asList(1)); assertEquals(1, result2); final Integer result3 = U.singleOrNull(new ArrayList<>()); assertNull(result3); final Integer result4 = U.singleOrNull(asList(1, 2, 3), item -> item % 2 == 1); assertNull(result4); final int result5 = U.singleOrNull(asList(1, 2, 3), item -> item % 2 == 0); assertEquals(2, result5); final Integer result6 = U.singleOrNull(asList(1, 2, 3), item -> item == 5); assertNull(result6); final Integer result7 = uWithMoreElement.singleOrNull(); assertNull(result7); final Integer result8 = uWithOneElement.singleOrNull(); assertEquals(result8, Integer.valueOf(1)); final Integer result9 = uWithMoreElement.singleOrNull(item -> item % 2 == 0); assertEquals(result9, Integer.valueOf(2)); final Integer result10 = uWithMoreElement.singleOrNull(item -> item % 2 == 1); assertNull(result10); } #location 8 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public static <E, F extends Number> Double average(final Iterable<E> iterable, final Function<E, F> func) { F sum = sum(iterable, func); if (sum == null) { return null; } return sum.doubleValue() / size(iterable); }
#vulnerable code public static <E, F extends Number> Double average(final Iterable<E> iterable, final Function<E, F> func) { F sum = sum(iterable, func); return sum.doubleValue() / size(iterable); } #location 3 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void firstOrNull() { final Integer result = $.firstOrNull(asList(5, 4, 3, 2, 1)); assertEquals("5", result.toString()); final Integer resultObj = new $<Integer>(asList(5, 4, 3, 2, 1)).firstOrNull(); assertEquals("5", resultObj.toString()); final Integer resultChain = $.chain(asList(5, 4, 3, 2, 1)).firstOrNull().item(); assertEquals("5", resultChain.toString()); assertNull($.firstOrNull(Collections.emptyList())); assertNull(new $<Integer>(Collections.<Integer>emptyList()).firstOrNull()); final int resultPred = $.firstOrNull(asList(5, 4, 3, 2, 1), new Predicate<Integer>() { public Boolean apply(Integer item) { return item % 2 == 0; } }); assertEquals(4, resultPred); final int resultPredChain = $.chain(asList(5, 4, 3, 2, 1)).firstOrNull(new Predicate<Integer>() { public Boolean apply(Integer item) { return item % 2 == 0; } }).item(); assertEquals(4, resultPredChain); assertNull($.firstOrNull(Collections.<Integer>emptyList(), new Predicate<Integer>() { public Boolean apply(Integer item) { return item % 2 == 0; } })); final int resultPredObj = new $<Integer>(asList(5, 4, 3, 2, 1)).firstOrNull(new Predicate<Integer>() { public Boolean apply(Integer item) { return item % 2 == 0; } }); assertEquals(4, resultPredObj); assertNull(new $<Integer>(Collections.<Integer>emptyList()).firstOrNull(new Predicate<Integer>() { public Boolean apply(Integer item) { return item % 2 == 0; } })); }
#vulnerable code @Test public void firstOrNull() { final Integer result = $.firstOrNull(asList(5, 4, 3, 2, 1)); assertEquals("5", result.toString()); final Integer resultObj = new $<Integer>(asList(5, 4, 3, 2, 1)).firstOrNull(); assertEquals("5", resultObj.toString()); assertNull($.firstOrNull(Collections.emptyList())); assertNull(new $<Integer>(Collections.<Integer>emptyList()).firstOrNull()); final int resultPred = $.firstOrNull(asList(5, 4, 3, 2, 1), new Predicate<Integer>() { public Boolean apply(Integer item) { return item % 2 == 0; } }); assertEquals(4, resultPred); assertNull($.firstOrNull(Collections.<Integer>emptyList(), new Predicate<Integer>() { public Boolean apply(Integer item) { return item % 2 == 0; } })); final int resultPredObj = new $<Integer>(asList(5, 4, 3, 2, 1)).firstOrNull(new Predicate<Integer>() { public Boolean apply(Integer item) { return item % 2 == 0; } }); assertEquals(4, resultPredObj); assertNull(new $<Integer>(Collections.<Integer>emptyList()).firstOrNull(new Predicate<Integer>() { public Boolean apply(Integer item) { return item % 2 == 0; } })); } #location 20 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public static <T extends Number> double mean(final Iterable<T> iterable) { T result = null; int count = 0; for (final T item : iterable) { result = add(result, item); count += 1; } if (result == null) { return 0d; } return result.doubleValue() / count; }
#vulnerable code public static <T extends Number> double mean(final Iterable<T> iterable) { T result = null; int count = 0; for (final T item : iterable) { result = add(result, item); count += 1; } return result.doubleValue() / count; } #location 8 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void detect() { final Optional<Integer> result = _.detect(asList(1, 2, 3, 4, 5, 6), new Predicate<Integer>() { public Boolean apply(Integer item) { return item % 2 == 0; } }); assertEquals("Optional.of(2)", result.toString()); }
#vulnerable code @Test public void detect() { final Integer result = _.detect(asList(1, 2, 3, 4, 5, 6), new Predicate<Integer>() { public Boolean apply(Integer item) { return item % 2 == 0; } }); assertEquals("2", result.toString()); } #location 9 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testLoadingLocalTime() throws Exception { Vertex v = this.sqlgGraph.addVertex(T.label, "Person", "createOn", LocalTime.now()); this.sqlgGraph.tx().commit(); this.sqlgGraph.close(); //noinspection Duplicates try (SqlgGraph sqlgGraph1 = SqlgGraph.open(configuration)) { Vertex vv = sqlgGraph1.traversal().V(v.id()).next(); Assert.assertTrue(vv.property("createOn").isPresent()); Map<String, PropertyType> propertyTypeMap = sqlgGraph1.getTopology().getAllTables().get(SchemaTable.of( sqlgGraph1.getSqlDialect().getPublicSchema(), "V_Person").toString()); Assert.assertTrue(propertyTypeMap.containsKey("createOn")); sqlgGraph1.tx().rollback(); } }
#vulnerable code @Test public void testLoadingLocalTime() throws Exception { Vertex v = this.sqlgGraph.addVertex(T.label, "Person", "createOn", LocalTime.now()); this.sqlgGraph.tx().commit(); this.sqlgGraph.close(); //noinspection Duplicates try (SqlgGraph sqlgGraph1 = SqlgGraph.open(configuration)) { Vertex vv = sqlgGraph1.traversal().V(v.id()).next(); assertTrue(vv.property("createOn").isPresent()); Map<String, PropertyType> propertyTypeMap = sqlgGraph1.getTopology().getAllTables().get(SchemaTable.of( sqlgGraph1.getSqlDialect().getPublicSchema(), "V_Person").toString()); assertTrue(propertyTypeMap.containsKey("createOn")); sqlgGraph1.tx().rollback(); } } #location 10 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testCreateEdgeBetweenVerticesPropertiesEagerlyLoadedOnHasHas() { Vertex person1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "john"); Vertex person2 = this.sqlgGraph.addVertex(T.label, "Person", "name", "peter"); this.sqlgGraph.tx().commit(); person1 = this.sqlgGraph.traversal().V(person1.id()).next(); person2 = this.sqlgGraph.traversal().V(person2.id()).next(); person1.addEdge("friend", person2); Assert.assertEquals("john", person1.value("name")); Assert.assertEquals("peter", person2.value("name")); this.sqlgGraph.tx().commit(); List<Vertex> vertices = this.sqlgGraph.traversal().V().<Vertex>has(T.label, "Person").<Vertex>has("name", "john").toList(); Assert.assertEquals(1, vertexTraversal(vertices.get(0)).out("friend").count().next().intValue()); Assert.assertEquals(1, vertices.size()); vertices = this.sqlgGraph.traversal().V().<Vertex>has(T.label, "Person").<Vertex>has("name", "peter").toList(); Assert.assertEquals(1, vertexTraversal(vertices.get(0)).in("friend").count().next().intValue()); Assert.assertEquals(1, vertices.size()); }
#vulnerable code @Test public void testCreateEdgeBetweenVerticesPropertiesEagerlyLoadedOnHasHas() { Vertex person1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "john"); Vertex person2 = this.sqlgGraph.addVertex(T.label, "Person", "name", "peter"); this.sqlgGraph.tx().commit(); person1 = this.sqlgGraph.v(person1.id()); person2 = this.sqlgGraph.v(person2.id()); person1.addEdge("friend", person2); Assert.assertEquals("john", person1.value("name")); Assert.assertEquals("peter", person2.value("name")); this.sqlgGraph.tx().commit(); List<Vertex> vertices = this.sqlgGraph.traversal().V().<Vertex>has(T.label, "Person").<Vertex>has("name", "john").toList(); Assert.assertEquals(1, vertexTraversal(vertices.get(0)).out("friend").count().next().intValue()); Assert.assertEquals(1, vertices.size()); vertices = this.sqlgGraph.traversal().V().<Vertex>has(T.label, "Person").<Vertex>has("name", "peter").toList(); Assert.assertEquals(1, vertexTraversal(vertices.get(0)).in("friend").count().next().intValue()); Assert.assertEquals(1, vertices.size()); } #location 8 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testBatchUpdatePersistentVertices() { Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "a"); Vertex v2 = this.sqlgGraph.addVertex(T.label, "Person", "surname", "b"); this.sqlgGraph.tx().commit(); assertEquals("a", this.sqlgGraph.traversal().V(v1.id()).next().value("name")); assertEquals("b", this.sqlgGraph.traversal().V(v2.id()).next().value("surname")); this.sqlgGraph.tx().rollback(); this.sqlgGraph.tx().normalBatchModeOn(); v1.property("name", "aa"); v2.property("surname", "bb"); this.sqlgGraph.tx().commit(); assertEquals("aa", this.sqlgGraph.traversal().V(v1.id()).next().value("name")); assertEquals("bb", this.sqlgGraph.traversal().V(v2.id()).next().value("surname")); }
#vulnerable code @Test public void testBatchUpdatePersistentVertices() { Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "a"); Vertex v2 = this.sqlgGraph.addVertex(T.label, "Person", "surname", "b"); this.sqlgGraph.tx().commit(); assertEquals("a", this.sqlgGraph.v(v1.id()).value("name")); assertEquals("b", this.sqlgGraph.v(v2.id()).value("surname")); this.sqlgGraph.tx().rollback(); this.sqlgGraph.tx().normalBatchModeOn(); v1.property("name", "aa"); v2.property("surname", "bb"); this.sqlgGraph.tx().commit(); assertEquals("aa", this.sqlgGraph.v(v1.id()).value("name")); assertEquals("bb", this.sqlgGraph.v(v2.id()).value("surname")); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testBatchUpdatePersistentVerticesAllTypes() { Assume.assumeTrue(this.sqlgGraph.features().vertex().properties().supportsFloatValues()); Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "a"); Vertex v2 = this.sqlgGraph.addVertex(T.label, "Person", "surname", "b"); this.sqlgGraph.tx().commit(); assertEquals("a", this.sqlgGraph.traversal().V(v1.id()).next().value("name")); assertEquals("b", this.sqlgGraph.traversal().V(v2.id()).next().value("surname")); this.sqlgGraph.tx().rollback(); this.sqlgGraph.tx().normalBatchModeOn(); v1.property("name", "aa"); v1.property("boolean", true); v1.property("short", (short) 1); v1.property("integer", 1); v1.property("long", 1L); v1.property("float", 1F); v1.property("double", 1D); v2.property("surname", "bb"); v2.property("boolean", false); v2.property("short", (short) 2); v2.property("integer", 2); v2.property("long", 2L); v2.property("float", 2F); v2.property("double", 2D); this.sqlgGraph.tx().commit(); assertEquals("aa", this.sqlgGraph.traversal().V(v1.id()).next().value("name")); assertEquals(true, this.sqlgGraph.traversal().V(v1.id()).next().value("boolean")); assertEquals((short) 1, this.sqlgGraph.traversal().V(v1.id()).next().<Short>value("short").shortValue()); assertEquals(1, this.sqlgGraph.traversal().V(v1.id()).next().<Integer>value("integer").intValue()); assertEquals(1L, this.sqlgGraph.traversal().V(v1.id()).next().<Long>value("long").longValue(), 0); assertEquals(1F, this.sqlgGraph.traversal().V(v1.id()).next().<Float>value("float").floatValue(), 0); assertEquals(1D, this.sqlgGraph.traversal().V(v1.id()).next().<Double>value("double").doubleValue(), 0); assertEquals("bb", this.sqlgGraph.traversal().V(v2.id()).next().value("surname")); assertEquals(false, this.sqlgGraph.traversal().V(v2.id()).next().value("boolean")); assertEquals((short) 2, this.sqlgGraph.traversal().V(v2.id()).next().<Short>value("short").shortValue()); assertEquals(2, this.sqlgGraph.traversal().V(v2.id()).next().<Integer>value("integer").intValue()); assertEquals(2L, this.sqlgGraph.traversal().V(v2.id()).next().<Long>value("long").longValue(), 0); assertEquals(2F, this.sqlgGraph.traversal().V(v2.id()).next().<Float>value("float").floatValue(), 0); assertEquals(2D, this.sqlgGraph.traversal().V(v2.id()).next().<Double>value("double").doubleValue(), 0); }
#vulnerable code @Test public void testBatchUpdatePersistentVerticesAllTypes() { Assume.assumeTrue(this.sqlgGraph.features().vertex().properties().supportsFloatValues()); Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "a"); Vertex v2 = this.sqlgGraph.addVertex(T.label, "Person", "surname", "b"); this.sqlgGraph.tx().commit(); assertEquals("a", this.sqlgGraph.v(v1.id()).value("name")); assertEquals("b", this.sqlgGraph.v(v2.id()).value("surname")); this.sqlgGraph.tx().rollback(); this.sqlgGraph.tx().normalBatchModeOn(); v1.property("name", "aa"); v1.property("boolean", true); v1.property("short", (short) 1); v1.property("integer", 1); v1.property("long", 1L); v1.property("float", 1F); v1.property("double", 1D); v2.property("surname", "bb"); v2.property("boolean", false); v2.property("short", (short) 2); v2.property("integer", 2); v2.property("long", 2L); v2.property("float", 2F); v2.property("double", 2D); this.sqlgGraph.tx().commit(); assertEquals("aa", this.sqlgGraph.v(v1.id()).value("name")); assertEquals(true, this.sqlgGraph.v(v1.id()).value("boolean")); assertEquals((short) 1, this.sqlgGraph.v(v1.id()).<Short>value("short").shortValue()); assertEquals(1, this.sqlgGraph.v(v1.id()).<Integer>value("integer").intValue()); assertEquals(1L, this.sqlgGraph.v(v1.id()).<Long>value("long").longValue(), 0); assertEquals(1F, this.sqlgGraph.v(v1.id()).<Float>value("float").floatValue(), 0); assertEquals(1D, this.sqlgGraph.v(v1.id()).<Double>value("double").doubleValue(), 0); assertEquals("bb", this.sqlgGraph.v(v2.id()).value("surname")); assertEquals(false, this.sqlgGraph.v(v2.id()).value("boolean")); assertEquals((short) 2, this.sqlgGraph.v(v2.id()).<Short>value("short").shortValue()); assertEquals(2, this.sqlgGraph.v(v2.id()).<Integer>value("integer").intValue()); assertEquals(2L, this.sqlgGraph.v(v2.id()).<Long>value("long").longValue(), 0); assertEquals(2F, this.sqlgGraph.v(v2.id()).<Float>value("float").floatValue(), 0); assertEquals(2D, this.sqlgGraph.v(v2.id()).<Double>value("double").doubleValue(), 0); } #location 9 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testLoadVertexProperties() { Vertex marko = this.sqlgGraph.addVertex(T.label, "Person", "name", "marko"); this.sqlgGraph.tx().commit(); marko = this.sqlgGraph.traversal().V(marko.id()).next(); Assert.assertEquals("marko", marko.property("name").value()); }
#vulnerable code @Test public void testLoadVertexProperties() { Vertex marko = this.sqlgGraph.addVertex(T.label, "Person", "name", "marko"); this.sqlgGraph.tx().commit(); marko = this.sqlgGraph.v(marko.id()); Assert.assertEquals("marko", marko.property("name").value()); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testCreateEdgeBetweenVerticesPropertiesEagerlyLoadedOnHas() { Vertex person1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "john"); Vertex person2 = this.sqlgGraph.addVertex(T.label, "Person", "name", "peter"); this.sqlgGraph.tx().commit(); person1 = this.sqlgGraph.traversal().V(person1.id()).next(); person2 = this.sqlgGraph.traversal().V(person2.id()).next(); person1.addEdge("friend", person2); Assert.assertEquals("john", person1.value("name")); Assert.assertEquals("peter", person2.value("name")); this.sqlgGraph.tx().commit(); List<Vertex> vertices = this.sqlgGraph.traversal().V().<Vertex>has(T.label, "Person").toList(); Assert.assertEquals(1, vertexTraversal(vertices.get(0)).out("friend").count().next().intValue()); Assert.assertEquals(1, vertexTraversal(vertices.get(1)).in("friend").count().next().intValue()); Assert.assertEquals(2, vertices.size()); }
#vulnerable code @Test public void testCreateEdgeBetweenVerticesPropertiesEagerlyLoadedOnHas() { Vertex person1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "john"); Vertex person2 = this.sqlgGraph.addVertex(T.label, "Person", "name", "peter"); this.sqlgGraph.tx().commit(); person1 = this.sqlgGraph.v(person1.id()); person2 = this.sqlgGraph.v(person2.id()); person1.addEdge("friend", person2); Assert.assertEquals("john", person1.value("name")); Assert.assertEquals("peter", person2.value("name")); this.sqlgGraph.tx().commit(); List<Vertex> vertices = this.sqlgGraph.traversal().V().<Vertex>has(T.label, "Person").toList(); Assert.assertEquals(1, vertexTraversal(vertices.get(0)).out("friend").count().next().intValue()); Assert.assertEquals(1, vertexTraversal(vertices.get(1)).in("friend").count().next().intValue()); Assert.assertEquals(2, vertices.size()); } #location 10 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private void loadResultSet(ResultSet resultSet, List<VertexLabel> inForeignKeys, List<VertexLabel> outForeignKeys) throws SQLException { SchemaTable inVertexColumnName = null; SchemaTable outVertexColumnName = null; ResultSetMetaData resultSetMetaData = resultSet.getMetaData(); for (int i = 1; i <= resultSetMetaData.getColumnCount(); i++) { String columnName = resultSetMetaData.getColumnLabel(i); if (!columnName.equals("ID") && !columnName.endsWith(Topology.OUT_VERTEX_COLUMN_END) && !columnName.endsWith(Topology.IN_VERTEX_COLUMN_END)) { loadProperty(resultSet, columnName, i); } } long inId = -1; List<Comparable> inComparables = new ArrayList<>(); for (VertexLabel inVertexLabel: inForeignKeys) { inVertexColumnName = SchemaTable.of(inVertexLabel.getSchema().getName(), inVertexLabel.getLabel()); if (inVertexLabel.hasIDPrimaryKey()) { String foreignKey = inVertexLabel.getSchema().getName() + "." + inVertexLabel.getName() + Topology.IN_VERTEX_COLUMN_END; inId = resultSet.getLong(foreignKey); if (!resultSet.wasNull()) { break; } } else { for (String identifier : inVertexLabel.getIdentifiers()) { PropertyColumn propertyColumn = inVertexLabel.getProperty(identifier).orElseThrow( () -> new IllegalStateException(String.format("identifier %s column must be a property", identifier)) ); PropertyType propertyType = propertyColumn.getPropertyType(); String[] propertyTypeToSqlDefinition = this.sqlgGraph.getSqlDialect().propertyTypeToSqlDefinition(propertyType); int count = 1; for (String ignored : propertyTypeToSqlDefinition) { if (count > 1) { inComparables.add((Comparable)resultSet.getObject(inVertexLabel.getFullName() + "." + identifier + propertyType.getPostFixes()[count - 2] + Topology.IN_VERTEX_COLUMN_END)); } else { //The first column existVertexLabel no postfix inComparables.add((Comparable)resultSet.getObject(inVertexLabel.getFullName() + "." + identifier + Topology.IN_VERTEX_COLUMN_END)); } count++; } } } } long outId = -1; List<Comparable> outComparables = new ArrayList<>(); for (VertexLabel outVertexLabel: outForeignKeys) { outVertexColumnName = SchemaTable.of(outVertexLabel.getSchema().getName(), outVertexLabel.getLabel()); if (outVertexLabel.hasIDPrimaryKey()) { String foreignKey = outVertexLabel.getSchema().getName() + "." + outVertexLabel.getName() + Topology.OUT_VERTEX_COLUMN_END; outId = resultSet.getLong(foreignKey); if (!resultSet.wasNull()) { break; } } else { for (String identifier : outVertexLabel.getIdentifiers()) { PropertyColumn propertyColumn = outVertexLabel.getProperty(identifier).orElseThrow( () -> new IllegalStateException(String.format("identifier %s column must be a property", identifier)) ); PropertyType propertyType = propertyColumn.getPropertyType(); String[] propertyTypeToSqlDefinition = this.sqlgGraph.getSqlDialect().propertyTypeToSqlDefinition(propertyType); int count = 1; for (String ignored : propertyTypeToSqlDefinition) { if (count > 1) { outComparables.add((Comparable)resultSet.getObject(outVertexLabel.getFullName() + "." + identifier + propertyType.getPostFixes()[count - 2] + Topology.OUT_VERTEX_COLUMN_END)); } else { //The first column existVertexLabel no postfix outComparables.add((Comparable)resultSet.getObject(outVertexLabel.getFullName() + "." + identifier + Topology.OUT_VERTEX_COLUMN_END)); } count++; } } } } if (inId != -1) { this.inVertex = SqlgVertex.of(this.sqlgGraph, inId, inVertexColumnName.getSchema(), SqlgUtil.removeTrailingInId(inVertexColumnName.getTable())); } else { Preconditions.checkState(!inComparables.isEmpty(), "The in ids are not found for the edge!"); this.inVertex = SqlgVertex.of(this.sqlgGraph, inComparables, inVertexColumnName.getSchema(), SqlgUtil.removeTrailingInId(inVertexColumnName.getTable())); } if (outId != -1) { this.outVertex = SqlgVertex.of(this.sqlgGraph, outId, outVertexColumnName.getSchema(), SqlgUtil.removeTrailingOutId(outVertexColumnName.getTable())); } else { Preconditions.checkState(!outComparables.isEmpty(), "The out ids are not found for the edge!"); this.outVertex = SqlgVertex.of(this.sqlgGraph, outComparables, outVertexColumnName.getSchema(), SqlgUtil.removeTrailingOutId(outVertexColumnName.getTable())); } }
#vulnerable code private void loadResultSet(ResultSet resultSet, List<VertexLabel> inForeignKeys, List<VertexLabel> outForeignKeys) throws SQLException { SchemaTable inVertexColumnName = null; SchemaTable outVertexColumnName = null; ResultSetMetaData resultSetMetaData = resultSet.getMetaData(); for (int i = 1; i <= resultSetMetaData.getColumnCount(); i++) { String columnName = resultSetMetaData.getColumnLabel(i); if (!columnName.equals("ID") && !columnName.endsWith(Topology.OUT_VERTEX_COLUMN_END) && !columnName.endsWith(Topology.IN_VERTEX_COLUMN_END)) { loadProperty(resultSet, columnName, i); } } long inId = -1; ListOrderedSet<Comparable> inComparables = new ListOrderedSet<>(); for (VertexLabel inVertexLabel: inForeignKeys) { inVertexColumnName = SchemaTable.of(inVertexLabel.getSchema().getName(), inVertexLabel.getLabel()); if (inVertexLabel.hasIDPrimaryKey()) { String foreignKey = inVertexLabel.getSchema().getName() + "." + inVertexLabel.getName() + Topology.IN_VERTEX_COLUMN_END; inId = resultSet.getLong(foreignKey); if (!resultSet.wasNull()) { break; } } else { for (String identifier : inVertexLabel.getIdentifiers()) { PropertyColumn propertyColumn = inVertexLabel.getProperty(identifier).orElseThrow( () -> new IllegalStateException(String.format("identifier %s column must be a property", identifier)) ); PropertyType propertyType = propertyColumn.getPropertyType(); String[] propertyTypeToSqlDefinition = this.sqlgGraph.getSqlDialect().propertyTypeToSqlDefinition(propertyType); int count = 1; for (String ignored : propertyTypeToSqlDefinition) { if (count > 1) { inComparables.add((Comparable)resultSet.getObject(inVertexLabel.getFullName() + "." + identifier + propertyType.getPostFixes()[count - 2] + Topology.IN_VERTEX_COLUMN_END)); } else { //The first column existVertexLabel no postfix inComparables.add((Comparable)resultSet.getObject(inVertexLabel.getFullName() + "." + identifier + Topology.IN_VERTEX_COLUMN_END)); } count++; } } } } long outId = -1; ListOrderedSet<Comparable> outComparables = new ListOrderedSet<>(); for (VertexLabel outVertexLabel: outForeignKeys) { outVertexColumnName = SchemaTable.of(outVertexLabel.getSchema().getName(), outVertexLabel.getLabel()); if (outVertexLabel.hasIDPrimaryKey()) { String foreignKey = outVertexLabel.getSchema().getName() + "." + outVertexLabel.getName() + Topology.OUT_VERTEX_COLUMN_END; outId = resultSet.getLong(foreignKey); if (!resultSet.wasNull()) { break; } } else { for (String identifier : outVertexLabel.getIdentifiers()) { PropertyColumn propertyColumn = outVertexLabel.getProperty(identifier).orElseThrow( () -> new IllegalStateException(String.format("identifier %s column must be a property", identifier)) ); PropertyType propertyType = propertyColumn.getPropertyType(); String[] propertyTypeToSqlDefinition = this.sqlgGraph.getSqlDialect().propertyTypeToSqlDefinition(propertyType); int count = 1; for (String ignored : propertyTypeToSqlDefinition) { if (count > 1) { outComparables.add((Comparable)resultSet.getObject(outVertexLabel.getFullName() + "." + identifier + propertyType.getPostFixes()[count - 2] + Topology.OUT_VERTEX_COLUMN_END)); } else { //The first column existVertexLabel no postfix outComparables.add((Comparable)resultSet.getObject(outVertexLabel.getFullName() + "." + identifier + Topology.OUT_VERTEX_COLUMN_END)); } count++; } } } } if (inId != -1) { this.inVertex = SqlgVertex.of(this.sqlgGraph, inId, inVertexColumnName.getSchema(), SqlgUtil.removeTrailingInId(inVertexColumnName.getTable())); } else { Preconditions.checkState(!inComparables.isEmpty(), "The in ids are not found for the edge!"); this.inVertex = SqlgVertex.of(this.sqlgGraph, inComparables, inVertexColumnName.getSchema(), SqlgUtil.removeTrailingInId(inVertexColumnName.getTable())); } if (outId != -1) { this.outVertex = SqlgVertex.of(this.sqlgGraph, outId, outVertexColumnName.getSchema(), SqlgUtil.removeTrailingOutId(outVertexColumnName.getTable())); } else { Preconditions.checkState(!outComparables.isEmpty(), "The out ids are not found for the edge!"); this.outVertex = SqlgVertex.of(this.sqlgGraph, outComparables, outVertexColumnName.getSchema(), SqlgUtil.removeTrailingOutId(outVertexColumnName.getTable())); } } #location 80 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testBatchUpdatePersistentVertices() { Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "a"); Vertex v2 = this.sqlgGraph.addVertex(T.label, "Person", "surname", "b"); this.sqlgGraph.tx().commit(); assertEquals("a", this.sqlgGraph.traversal().V(v1.id()).next().value("name")); assertEquals("b", this.sqlgGraph.traversal().V(v2.id()).next().value("surname")); this.sqlgGraph.tx().rollback(); this.sqlgGraph.tx().normalBatchModeOn(); v1.property("name", "aa"); v2.property("surname", "bb"); this.sqlgGraph.tx().commit(); assertEquals("aa", this.sqlgGraph.traversal().V(v1.id()).next().value("name")); assertEquals("bb", this.sqlgGraph.traversal().V(v2.id()).next().value("surname")); }
#vulnerable code @Test public void testBatchUpdatePersistentVertices() { Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "a"); Vertex v2 = this.sqlgGraph.addVertex(T.label, "Person", "surname", "b"); this.sqlgGraph.tx().commit(); assertEquals("a", this.sqlgGraph.v(v1.id()).value("name")); assertEquals("b", this.sqlgGraph.v(v2.id()).value("surname")); this.sqlgGraph.tx().rollback(); this.sqlgGraph.tx().normalBatchModeOn(); v1.property("name", "aa"); v2.property("surname", "bb"); this.sqlgGraph.tx().commit(); assertEquals("aa", this.sqlgGraph.v(v1.id()).value("name")); assertEquals("bb", this.sqlgGraph.v(v2.id()).value("surname")); } #location 7 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testCreateEdgeBetweenVerticesPropertiesEagerlyLoadedOnHas() { Vertex person1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "john"); Vertex person2 = this.sqlgGraph.addVertex(T.label, "Person", "name", "peter"); this.sqlgGraph.tx().commit(); person1 = this.sqlgGraph.traversal().V(person1.id()).next(); person2 = this.sqlgGraph.traversal().V(person2.id()).next(); person1.addEdge("friend", person2); Assert.assertEquals("john", person1.value("name")); Assert.assertEquals("peter", person2.value("name")); this.sqlgGraph.tx().commit(); List<Vertex> vertices = this.sqlgGraph.traversal().V().<Vertex>has(T.label, "Person").toList(); Assert.assertEquals(1, vertexTraversal(vertices.get(0)).out("friend").count().next().intValue()); Assert.assertEquals(1, vertexTraversal(vertices.get(1)).in("friend").count().next().intValue()); Assert.assertEquals(2, vertices.size()); }
#vulnerable code @Test public void testCreateEdgeBetweenVerticesPropertiesEagerlyLoadedOnHas() { Vertex person1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "john"); Vertex person2 = this.sqlgGraph.addVertex(T.label, "Person", "name", "peter"); this.sqlgGraph.tx().commit(); person1 = this.sqlgGraph.v(person1.id()); person2 = this.sqlgGraph.v(person2.id()); person1.addEdge("friend", person2); Assert.assertEquals("john", person1.value("name")); Assert.assertEquals("peter", person2.value("name")); this.sqlgGraph.tx().commit(); List<Vertex> vertices = this.sqlgGraph.traversal().V().<Vertex>has(T.label, "Person").toList(); Assert.assertEquals(1, vertexTraversal(vertices.get(0)).out("friend").count().next().intValue()); Assert.assertEquals(1, vertexTraversal(vertices.get(1)).in("friend").count().next().intValue()); Assert.assertEquals(2, vertices.size()); } #location 8 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testMultipleReferencesToSameVertex2Instances() { Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "john"); this.sqlgGraph.tx().commit(); //_v1 is in the transaction cache //v1 is not Vertex _v1 = this.sqlgGraph.traversal().V(v1.id()).next(); Assert.assertEquals("john", v1.value("name")); Assert.assertEquals("john", _v1.value("name")); v1.property("name", "john1"); Assert.assertEquals("john1", v1.value("name")); Assert.assertEquals("john1", _v1.value("name")); }
#vulnerable code @Test public void testMultipleReferencesToSameVertex2Instances() { Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "john"); this.sqlgGraph.tx().commit(); //_v1 is in the transaction cache //v1 is not Vertex _v1 = this.sqlgGraph.v(v1.id()); Assert.assertEquals("john", v1.value("name")); Assert.assertEquals("john", _v1.value("name")); v1.property("name", "john1"); Assert.assertEquals("john1", v1.value("name")); Assert.assertEquals("john1", _v1.value("name")); } #location 9 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testCreateEdgeBetweenVerticesPropertiesEagerlyLoadedOnHasHas() { Vertex person1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "john"); Vertex person2 = this.sqlgGraph.addVertex(T.label, "Person", "name", "peter"); this.sqlgGraph.tx().commit(); person1 = this.sqlgGraph.traversal().V(person1.id()).next(); person2 = this.sqlgGraph.traversal().V(person2.id()).next(); person1.addEdge("friend", person2); Assert.assertEquals("john", person1.value("name")); Assert.assertEquals("peter", person2.value("name")); this.sqlgGraph.tx().commit(); List<Vertex> vertices = this.sqlgGraph.traversal().V().<Vertex>has(T.label, "Person").<Vertex>has("name", "john").toList(); Assert.assertEquals(1, vertexTraversal(vertices.get(0)).out("friend").count().next().intValue()); Assert.assertEquals(1, vertices.size()); vertices = this.sqlgGraph.traversal().V().<Vertex>has(T.label, "Person").<Vertex>has("name", "peter").toList(); Assert.assertEquals(1, vertexTraversal(vertices.get(0)).in("friend").count().next().intValue()); Assert.assertEquals(1, vertices.size()); }
#vulnerable code @Test public void testCreateEdgeBetweenVerticesPropertiesEagerlyLoadedOnHasHas() { Vertex person1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "john"); Vertex person2 = this.sqlgGraph.addVertex(T.label, "Person", "name", "peter"); this.sqlgGraph.tx().commit(); person1 = this.sqlgGraph.v(person1.id()); person2 = this.sqlgGraph.v(person2.id()); person1.addEdge("friend", person2); Assert.assertEquals("john", person1.value("name")); Assert.assertEquals("peter", person2.value("name")); this.sqlgGraph.tx().commit(); List<Vertex> vertices = this.sqlgGraph.traversal().V().<Vertex>has(T.label, "Person").<Vertex>has("name", "john").toList(); Assert.assertEquals(1, vertexTraversal(vertices.get(0)).out("friend").count().next().intValue()); Assert.assertEquals(1, vertices.size()); vertices = this.sqlgGraph.traversal().V().<Vertex>has(T.label, "Person").<Vertex>has("name", "peter").toList(); Assert.assertEquals(1, vertexTraversal(vertices.get(0)).in("friend").count().next().intValue()); Assert.assertEquals(1, vertices.size()); } #location 10 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @BeforeClass public static void beforeClass() throws Exception { URL sqlProperties = Thread.currentThread().getContextClassLoader().getResource("sqlg.properties"); configuration = new PropertiesConfiguration(sqlProperties); if (!configuration.containsKey("jdbc.url")) { throw new IllegalArgumentException(String.format("SqlGraph configuration requires that the %s be set", "jdbc.url")); } String url = configuration.getString("jdbc.url"); //obtain the connection that we will later supply from JNDI ds = new C3p0DataSourceFactory().setup(url, configuration).getDatasource(); //change the connection url to be a JNDI one configuration.setProperty("jdbc.url", "jndi:testConnection"); //set up the initial context NamingManager.setInitialContextFactoryBuilder(environment -> { InitialContextFactory mockFactory = mock(InitialContextFactory.class); Context mockContext = mock(Context.class); when(mockFactory.getInitialContext(any())).thenReturn(mockContext); when(mockContext.lookup("testConnection")).thenReturn(ds); return mockFactory; }); }
#vulnerable code @BeforeClass public static void beforeClass() throws ClassNotFoundException, IOException, PropertyVetoException, NamingException, ConfigurationException { URL sqlProperties = Thread.currentThread().getContextClassLoader().getResource("sqlg.properties"); configuration = new PropertiesConfiguration(sqlProperties); if (!configuration.containsKey("jdbc.url")) { throw new IllegalArgumentException(String.format("SqlGraph configuration requires that the %s be set", "jdbc.url")); } String url = configuration.getString("jdbc.url"); //obtain the connection that we will later supply from JNDI SqlgGraph g = SqlgGraph.open(configuration); ds = g.getSqlgDataSource().get(url); // g.getTopology().close(); //change the connection url to be a JNDI one configuration.setProperty("jdbc.url", "jndi:testConnection"); //set up the initial context NamingManager.setInitialContextFactoryBuilder(environment -> { InitialContextFactory mockFactory = mock(InitialContextFactory.class); Context mockContext = mock(Context.class); when(mockFactory.getInitialContext(any())).thenReturn(mockContext); when(mockContext.lookup("testConnection")).thenReturn(ds); return mockFactory; }); } #location 13 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testLoadingJson() throws Exception { Assume.assumeTrue(this.sqlgGraph.getSqlDialect().supportsJson()); ObjectMapper objectMapper = new ObjectMapper(); ObjectNode json = new ObjectNode(objectMapper.getNodeFactory()); json.put("username", "john"); Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person", "doc", json); this.sqlgGraph.tx().commit(); this.sqlgGraph.close(); try (SqlgGraph sqlgGraph1 = SqlgGraph.open(configuration)) { Vertex vv = sqlgGraph1.traversal().V(v1.id()).next(); Assert.assertTrue(vv.property("doc").isPresent()); Map<String, PropertyType> propertyTypeMap = sqlgGraph1.getTopology().getAllTables().get(SchemaTable.of( sqlgGraph1.getSqlDialect().getPublicSchema(), "V_Person").toString()); Assert.assertTrue(propertyTypeMap.containsKey("doc")); sqlgGraph1.tx().rollback(); } }
#vulnerable code @Test public void testLoadingJson() throws Exception { Assume.assumeTrue(this.sqlgGraph.getSqlDialect().supportsJson()); ObjectMapper objectMapper = new ObjectMapper(); ObjectNode json = new ObjectNode(objectMapper.getNodeFactory()); json.put("username", "john"); Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person", "doc", json); this.sqlgGraph.tx().commit(); this.sqlgGraph.close(); try (SqlgGraph sqlgGraph1 = SqlgGraph.open(configuration)) { Vertex vv = sqlgGraph1.traversal().V(v1.id()).next(); assertTrue(vv.property("doc").isPresent()); Map<String, PropertyType> propertyTypeMap = sqlgGraph1.getTopology().getAllTables().get(SchemaTable.of( sqlgGraph1.getSqlDialect().getPublicSchema(), "V_Person").toString()); assertTrue(propertyTypeMap.containsKey("doc")); sqlgGraph1.tx().rollback(); } } #location 13 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testVertexTransactionalCache2() { Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person"); Vertex v2 = this.sqlgGraph.addVertex(T.label, "Person"); Vertex v3 = this.sqlgGraph.addVertex(T.label, "Person"); Edge e1 = v1.addEdge("friend", v2); Assert.assertEquals(1, vertexTraversal(v1).out("friend").count().next().intValue()); Vertex tmpV1 = edgeTraversal(this.sqlgGraph.traversal().E(e1.id()).next()).outV().next(); tmpV1.addEdge("foe", v3); //this should fail as v1's out edges will not be updated Assert.assertEquals(1, vertexTraversal(tmpV1).out("foe").count().next().intValue()); Assert.assertEquals(1, vertexTraversal(v1).out("foe").count().next().intValue()); this.sqlgGraph.tx().rollback(); }
#vulnerable code @Test public void testVertexTransactionalCache2() { Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person"); Vertex v2 = this.sqlgGraph.addVertex(T.label, "Person"); Vertex v3 = this.sqlgGraph.addVertex(T.label, "Person"); Edge e1 = v1.addEdge("friend", v2); Assert.assertEquals(1, vertexTraversal(v1).out("friend").count().next().intValue()); Vertex tmpV1 = edgeTraversal(this.sqlgGraph.e(e1.id())).outV().next(); tmpV1.addEdge("foe", v3); //this should fail as v1's out edges will not be updated Assert.assertEquals(1, vertexTraversal(tmpV1).out("foe").count().next().intValue()); Assert.assertEquals(1, vertexTraversal(v1).out("foe").count().next().intValue()); this.sqlgGraph.tx().rollback(); } #location 9 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testBatchUpdateDifferentPropertiesDifferentRows() { Vertex sqlgVertex1 = this.sqlgGraph.addVertex(T.label, "Person", "property1", "a1", "property2", "b1", "property3", "c1"); Vertex sqlgVertex2 = this.sqlgGraph.addVertex(T.label, "Person", "property1", "a2", "property2", "b2", "property3", "c2"); Vertex sqlgVertex3 = this.sqlgGraph.addVertex(T.label, "Person", "property1", "a3", "property2", "b3", "property3", "c3"); this.sqlgGraph.tx().commit(); sqlgVertex1 = this.sqlgGraph.traversal().V(sqlgVertex1.id()).next(); assertEquals("a1", sqlgVertex1.value("property1")); assertEquals("b1", sqlgVertex1.value("property2")); assertEquals("c1", sqlgVertex1.value("property3")); this.sqlgGraph.tx().rollback(); this.sqlgGraph.tx().normalBatchModeOn(); sqlgVertex1 = this.sqlgGraph.traversal().V(sqlgVertex1.id()).next(); sqlgVertex1.property("property1", "a11"); sqlgVertex2.property("property2", "b22"); sqlgVertex3.property("property3", "c33"); this.sqlgGraph.tx().commit(); assertEquals("a11", sqlgVertex1.value("property1")); assertEquals("b1", sqlgVertex1.value("property2")); assertEquals("c1", sqlgVertex1.value("property3")); sqlgVertex1 = this.sqlgGraph.traversal().V(sqlgVertex1.id()).next(); assertEquals("a11", sqlgVertex1.value("property1")); assertEquals("b1", sqlgVertex1.value("property2")); assertEquals("c1", sqlgVertex1.value("property3")); sqlgVertex2 = this.sqlgGraph.traversal().V(sqlgVertex2.id()).next(); assertEquals("a2", sqlgVertex2.value("property1")); assertEquals("b22", sqlgVertex2.value("property2")); assertEquals("c2", sqlgVertex2.value("property3")); sqlgVertex3 = this.sqlgGraph.traversal().V(sqlgVertex3.id()).next(); assertEquals("a3", sqlgVertex3.value("property1")); assertEquals("b3", sqlgVertex3.value("property2")); assertEquals("c33", sqlgVertex3.value("property3")); }
#vulnerable code @Test public void testBatchUpdateDifferentPropertiesDifferentRows() { Vertex sqlgVertex1 = this.sqlgGraph.addVertex(T.label, "Person", "property1", "a1", "property2", "b1", "property3", "c1"); Vertex sqlgVertex2 = this.sqlgGraph.addVertex(T.label, "Person", "property1", "a2", "property2", "b2", "property3", "c2"); Vertex sqlgVertex3 = this.sqlgGraph.addVertex(T.label, "Person", "property1", "a3", "property2", "b3", "property3", "c3"); this.sqlgGraph.tx().commit(); sqlgVertex1 = this.sqlgGraph.v(sqlgVertex1.id()); assertEquals("a1", sqlgVertex1.value("property1")); assertEquals("b1", sqlgVertex1.value("property2")); assertEquals("c1", sqlgVertex1.value("property3")); this.sqlgGraph.tx().rollback(); this.sqlgGraph.tx().normalBatchModeOn(); sqlgVertex1 = this.sqlgGraph.v(sqlgVertex1.id()); sqlgVertex1.property("property1", "a11"); sqlgVertex2.property("property2", "b22"); sqlgVertex3.property("property3", "c33"); this.sqlgGraph.tx().commit(); assertEquals("a11", sqlgVertex1.value("property1")); assertEquals("b1", sqlgVertex1.value("property2")); assertEquals("c1", sqlgVertex1.value("property3")); sqlgVertex1 = this.sqlgGraph.v(sqlgVertex1.id()); assertEquals("a11", sqlgVertex1.value("property1")); assertEquals("b1", sqlgVertex1.value("property2")); assertEquals("c1", sqlgVertex1.value("property3")); sqlgVertex2 = this.sqlgGraph.v(sqlgVertex2.id()); assertEquals("a2", sqlgVertex2.value("property1")); assertEquals("b22", sqlgVertex2.value("property2")); assertEquals("c2", sqlgVertex2.value("property3")); sqlgVertex3 = this.sqlgGraph.v(sqlgVertex3.id()); assertEquals("a3", sqlgVertex3.value("property1")); assertEquals("b3", sqlgVertex3.value("property2")); assertEquals("c33", sqlgVertex3.value("property3")); } #location 10 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testLoadingDatasourceFromJndi() throws Exception { SqlgGraph g = SqlgGraph.open(configuration); assertNotNull(g.getSqlDialect()); assertEquals(configuration.getString("jdbc.url"), g.getJdbcUrl()); assertNotNull(g.getConnection()); }
#vulnerable code @Test public void testLoadingDatasourceFromJndi() throws Exception { SqlgGraph g = SqlgGraph.open(configuration); assertNotNull(g.getSqlDialect()); assertNotNull(g.getSqlgDataSource().get(configuration.getString("jdbc.url"))); } #location 5 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testLoadPropertiesOnUpdate() { Vertex vertex = this.sqlgGraph.addVertex(T.label, "Person", "property1", "a", "property2", "b"); this.sqlgGraph.tx().commit(); vertex = this.sqlgGraph.traversal().V(vertex.id()).next(); vertex.property("property1", "aa"); assertEquals("b", vertex.value("property2")); }
#vulnerable code @Test public void testLoadPropertiesOnUpdate() { Vertex vertex = this.sqlgGraph.addVertex(T.label, "Person", "property1", "a", "property2", "b"); this.sqlgGraph.tx().commit(); vertex = this.sqlgGraph.v(vertex.id()); vertex.property("property1", "aa"); assertEquals("b", vertex.value("property2")); } #location 7 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public Map<String, Set<IndexRef>> extractIndices(Connection conn, String catalog, String schema) throws SQLException{ // copied and simplified from the postgres JDBC driver class (PgDatabaseMetaData) String sql = "SELECT NULL AS TABLE_CAT, n.nspname AS TABLE_SCHEM, " + " ct.relname AS TABLE_NAME, NOT i.indisunique AS NON_UNIQUE, " + " NULL AS INDEX_QUALIFIER, ci.relname AS INDEX_NAME, " + " CASE i.indisclustered " + " WHEN true THEN " + java.sql.DatabaseMetaData.tableIndexClustered + " ELSE CASE am.amname " + " WHEN 'hash' THEN " + java.sql.DatabaseMetaData.tableIndexHashed + " ELSE " + java.sql.DatabaseMetaData.tableIndexOther + " END " + " END AS TYPE, " + " (i.keys).n AS ORDINAL_POSITION, " + " trim(both '\"' from pg_catalog.pg_get_indexdef(ci.oid, (i.keys).n, false)) AS COLUMN_NAME " + "FROM pg_catalog.pg_class ct " + " JOIN pg_catalog.pg_namespace n ON (ct.relnamespace = n.oid) " + " JOIN (SELECT i.indexrelid, i.indrelid, i.indoption, " + " i.indisunique, i.indisclustered, i.indpred, " + " i.indexprs, " + " information_schema._pg_expandarray(i.indkey) AS keys " + " FROM pg_catalog.pg_index i) i " + " ON (ct.oid = i.indrelid) " + " JOIN pg_catalog.pg_class ci ON (ci.oid = i.indexrelid) " + " JOIN pg_catalog.pg_am am ON (ci.relam = am.oid) " + "WHERE true "; if (schema != null && !"".equals(schema)) { sql += " AND n.nspname = " + maybeWrapInQoutes(schema); } else { // exclude schemas we know we're not interested in sql += " AND n.nspname <> 'pg_catalog' AND n.nspname <> 'pg_toast' AND n.nspname <> '"+SQLG_SCHEMA+"'"; } sql += " ORDER BY NON_UNIQUE, TYPE, INDEX_NAME, ORDINAL_POSITION "; try (Statement s=conn.createStatement()){ try (ResultSet indexRs=s.executeQuery(sql)){ Map<String, Set<IndexRef>> ret=new HashMap<>(); String lastKey=null; String lastIndexName=null; IndexType lastIndexType=null; List<String> lastColumns=new LinkedList<>(); while (indexRs.next()){ String cat=indexRs.getString("TABLE_CAT"); String sch=indexRs.getString("TABLE_SCHEM"); String tbl=indexRs.getString("TABLE_NAME"); String key=cat+"."+sch+"."+tbl; String indexName=indexRs.getString("INDEX_NAME"); boolean nonUnique=indexRs.getBoolean("NON_UNIQUE"); if (lastIndexName==null){ lastIndexName=indexName; lastIndexType=nonUnique?IndexType.NON_UNIQUE:IndexType.UNIQUE; lastKey=key; } else if (!lastIndexName.equals(indexName)){ if (!lastIndexName.endsWith("_pkey") && !lastIndexName.endsWith("_idx")){ if (!Schema.GLOBAL_UNIQUE_INDEX_SCHEMA.equals(schema)){ //System.out.println(lastColumns); //TopologyManager.addGlobalUniqueIndex(sqlgGraph,lastIndexName,lastColumns); //} else { MultiMap.put(ret, lastKey, new IndexRef(lastIndexName,lastIndexType,lastColumns)); } } lastColumns.clear(); lastIndexName=indexName; lastIndexType=nonUnique?IndexType.NON_UNIQUE:IndexType.UNIQUE; } lastColumns.add(indexRs.getString("COLUMN_NAME")); lastKey=key; } if (lastIndexName!=null && !lastIndexName.endsWith("_pkey") && !lastIndexName.endsWith("_idx")){ if (!Schema.GLOBAL_UNIQUE_INDEX_SCHEMA.equals(schema)){ //System.out.println(lastColumns); //TopologyManager.addGlobalUniqueIndex(sqlgGraph,lastIndexName,lastColumns); //} else { MultiMap.put(ret, lastKey, new IndexRef(lastIndexName,lastIndexType,lastColumns)); } } return ret; } } }
#vulnerable code @Override public Map<String, Set<IndexRef>> extractIndices(Connection conn, String catalog, String schema) throws SQLException{ // copied and simplified from the postgres JDBC driver class (PgDatabaseMetaData) String sql = "SELECT NULL AS TABLE_CAT, n.nspname AS TABLE_SCHEM, " + " ct.relname AS TABLE_NAME, NOT i.indisunique AS NON_UNIQUE, " + " NULL AS INDEX_QUALIFIER, ci.relname AS INDEX_NAME, " + " CASE i.indisclustered " + " WHEN true THEN " + java.sql.DatabaseMetaData.tableIndexClustered + " ELSE CASE am.amname " + " WHEN 'hash' THEN " + java.sql.DatabaseMetaData.tableIndexHashed + " ELSE " + java.sql.DatabaseMetaData.tableIndexOther + " END " + " END AS TYPE, " + " (i.keys).n AS ORDINAL_POSITION, " + " trim(both '\"' from pg_catalog.pg_get_indexdef(ci.oid, (i.keys).n, false)) AS COLUMN_NAME " + "FROM pg_catalog.pg_class ct " + " JOIN pg_catalog.pg_namespace n ON (ct.relnamespace = n.oid) " + " JOIN (SELECT i.indexrelid, i.indrelid, i.indoption, " + " i.indisunique, i.indisclustered, i.indpred, " + " i.indexprs, " + " information_schema._pg_expandarray(i.indkey) AS keys " + " FROM pg_catalog.pg_index i) i " + " ON (ct.oid = i.indrelid) " + " JOIN pg_catalog.pg_class ci ON (ci.oid = i.indexrelid) " + " JOIN pg_catalog.pg_am am ON (ci.relam = am.oid) " + "WHERE true "; if (schema != null && !"".equals(schema)) { sql += " AND n.nspname = " + maybeWrapInQoutes(schema); } sql += " ORDER BY NON_UNIQUE, TYPE, INDEX_NAME, ORDINAL_POSITION "; try (Statement s=conn.createStatement()){ try (ResultSet indexRs=s.executeQuery(sql)){ Map<String, Set<IndexRef>> ret=new HashMap<>(); String lastKey=null; String lastIndexName=null; IndexType lastIndexType=null; List<String> lastColumns=new LinkedList<>(); while (indexRs.next()){ String cat=indexRs.getString("TABLE_CAT"); String sch=indexRs.getString("TABLE_SCHEM"); String tbl=indexRs.getString("TABLE_NAME"); String key=cat+"."+sch+"."+tbl; String indexName=indexRs.getString("INDEX_NAME"); boolean nonUnique=indexRs.getBoolean("NON_UNIQUE"); if (lastIndexName==null){ lastIndexName=indexName; lastIndexType=nonUnique?IndexType.NON_UNIQUE:IndexType.UNIQUE; lastKey=key; } else if (!lastIndexName.equals(indexName)){ if (!lastIndexName.endsWith("_pkey") && !lastIndexName.endsWith("_idx")){ if (!Schema.GLOBAL_UNIQUE_INDEX_SCHEMA.equals(schema)){ //System.out.println(lastColumns); //TopologyManager.addGlobalUniqueIndex(sqlgGraph,lastIndexName,lastColumns); //} else { MultiMap.put(ret, lastKey, new IndexRef(lastIndexName,lastIndexType,lastColumns)); } } lastColumns.clear(); lastIndexName=indexName; lastIndexType=nonUnique?IndexType.NON_UNIQUE:IndexType.UNIQUE; } lastColumns.add(indexRs.getString("COLUMN_NAME")); lastKey=key; } if (!lastIndexName.endsWith("_pkey") && !lastIndexName.endsWith("_idx")){ if (!Schema.GLOBAL_UNIQUE_INDEX_SCHEMA.equals(schema)){ //System.out.println(lastColumns); //TopologyManager.addGlobalUniqueIndex(sqlgGraph,lastIndexName,lastColumns); //} else { MultiMap.put(ret, lastKey, new IndexRef(lastIndexName,lastIndexType,lastColumns)); } } return ret; } } } #location 53 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testMultipleReferencesToSameVertex() { Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "john"); this.sqlgGraph.tx().commit(); Assert.assertEquals("john", v1.value("name")); //_v1 is in the transaction cache //v1 is not Vertex _v1 = this.sqlgGraph.traversal().V(v1.id()).next(); Assert.assertEquals("john", _v1.value("name")); v1.property("name", "john1"); Assert.assertEquals("john1", v1.value("name")); Assert.assertEquals("john1", _v1.value("name")); }
#vulnerable code @Test public void testMultipleReferencesToSameVertex() { Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "john"); this.sqlgGraph.tx().commit(); Assert.assertEquals("john", v1.value("name")); //_v1 is in the transaction cache //v1 is not Vertex _v1 = this.sqlgGraph.v(v1.id()); Assert.assertEquals("john", _v1.value("name")); v1.property("name", "john1"); Assert.assertEquals("john1", v1.value("name")); Assert.assertEquals("john1", _v1.value("name")); } #location 9 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @BeforeClass public static void beforeClass() throws Exception { URL sqlProperties = Thread.currentThread().getContextClassLoader().getResource("sqlg.properties"); configuration = new PropertiesConfiguration(sqlProperties); if (!configuration.containsKey("jdbc.url")) { throw new IllegalArgumentException(String.format("SqlGraph configuration requires that the %s be set", "jdbc.url")); } ds = C3P0DataSource.create(configuration).getDatasource(); //change the connection url to be a JNDI one configuration.setProperty("jdbc.url", "jndi:testConnection"); //set up the initial context NamingManager.setInitialContextFactoryBuilder(environment -> { InitialContextFactory mockFactory = mock(InitialContextFactory.class); Context mockContext = mock(Context.class); when(mockFactory.getInitialContext(any())).thenReturn(mockContext); when(mockContext.lookup("testConnection")).thenReturn(ds); return mockFactory; }); }
#vulnerable code @BeforeClass public static void beforeClass() throws Exception { URL sqlProperties = Thread.currentThread().getContextClassLoader().getResource("sqlg.properties"); configuration = new PropertiesConfiguration(sqlProperties); if (!configuration.containsKey("jdbc.url")) { throw new IllegalArgumentException(String.format("SqlGraph configuration requires that the %s be set", "jdbc.url")); } String url = configuration.getString("jdbc.url"); //obtain the connection that we will later supply from JNDI SqlgPlugin p = findSqlgPlugin(url); Assert.assertNotNull(p); ds = new C3p0DataSourceFactory().setup(p.getDriverFor(url), configuration).getDatasource(); //change the connection url to be a JNDI one configuration.setProperty("jdbc.url", "jndi:testConnection"); //set up the initial context NamingManager.setInitialContextFactoryBuilder(environment -> { InitialContextFactory mockFactory = mock(InitialContextFactory.class); Context mockContext = mock(Context.class); when(mockFactory.getInitialContext(any())).thenReturn(mockContext); when(mockContext.lookup("testConnection")).thenReturn(ds); return mockFactory; }); } #location 13 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @BeforeClass public static void beforeClass() throws Exception { URL sqlProperties = Thread.currentThread().getContextClassLoader().getResource("sqlg.properties"); configuration = new PropertiesConfiguration(sqlProperties); if (!configuration.containsKey("jdbc.url")) { throw new IllegalArgumentException(String.format("SqlGraph configuration requires that the %s be set", "jdbc.url")); } String url = configuration.getString("jdbc.url"); //obtain the connection that we will later supply from JNDI ds = new C3p0DataSourceFactory().setup(url, configuration).getDatasource(); //change the connection url to be a JNDI one configuration.setProperty("jdbc.url", "jndi:testConnection"); //set up the initial context NamingManager.setInitialContextFactoryBuilder(environment -> { InitialContextFactory mockFactory = mock(InitialContextFactory.class); Context mockContext = mock(Context.class); when(mockFactory.getInitialContext(any())).thenReturn(mockContext); when(mockContext.lookup("testConnection")).thenReturn(ds); return mockFactory; }); }
#vulnerable code @BeforeClass public static void beforeClass() throws ClassNotFoundException, IOException, PropertyVetoException, NamingException, ConfigurationException { URL sqlProperties = Thread.currentThread().getContextClassLoader().getResource("sqlg.properties"); configuration = new PropertiesConfiguration(sqlProperties); if (!configuration.containsKey("jdbc.url")) { throw new IllegalArgumentException(String.format("SqlGraph configuration requires that the %s be set", "jdbc.url")); } String url = configuration.getString("jdbc.url"); //obtain the connection that we will later supply from JNDI SqlgGraph g = SqlgGraph.open(configuration); ds = g.getSqlgDataSource().get(url); // g.getTopology().close(); //change the connection url to be a JNDI one configuration.setProperty("jdbc.url", "jndi:testConnection"); //set up the initial context NamingManager.setInitialContextFactoryBuilder(environment -> { InitialContextFactory mockFactory = mock(InitialContextFactory.class); Context mockContext = mock(Context.class); when(mockFactory.getInitialContext(any())).thenReturn(mockContext); when(mockContext.lookup("testConnection")).thenReturn(ds); return mockFactory; }); } #location 13 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testBatchUpdatePersistentVerticesAllTypes() { Assume.assumeTrue(this.sqlgGraph.features().vertex().properties().supportsFloatValues()); Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "a"); Vertex v2 = this.sqlgGraph.addVertex(T.label, "Person", "surname", "b"); this.sqlgGraph.tx().commit(); assertEquals("a", this.sqlgGraph.traversal().V(v1.id()).next().value("name")); assertEquals("b", this.sqlgGraph.traversal().V(v2.id()).next().value("surname")); this.sqlgGraph.tx().rollback(); this.sqlgGraph.tx().normalBatchModeOn(); v1.property("name", "aa"); v1.property("boolean", true); v1.property("short", (short) 1); v1.property("integer", 1); v1.property("long", 1L); v1.property("float", 1F); v1.property("double", 1D); v2.property("surname", "bb"); v2.property("boolean", false); v2.property("short", (short) 2); v2.property("integer", 2); v2.property("long", 2L); v2.property("float", 2F); v2.property("double", 2D); this.sqlgGraph.tx().commit(); assertEquals("aa", this.sqlgGraph.traversal().V(v1.id()).next().value("name")); assertEquals(true, this.sqlgGraph.traversal().V(v1.id()).next().value("boolean")); assertEquals((short) 1, this.sqlgGraph.traversal().V(v1.id()).next().<Short>value("short").shortValue()); assertEquals(1, this.sqlgGraph.traversal().V(v1.id()).next().<Integer>value("integer").intValue()); assertEquals(1L, this.sqlgGraph.traversal().V(v1.id()).next().<Long>value("long").longValue(), 0); assertEquals(1F, this.sqlgGraph.traversal().V(v1.id()).next().<Float>value("float").floatValue(), 0); assertEquals(1D, this.sqlgGraph.traversal().V(v1.id()).next().<Double>value("double").doubleValue(), 0); assertEquals("bb", this.sqlgGraph.traversal().V(v2.id()).next().value("surname")); assertEquals(false, this.sqlgGraph.traversal().V(v2.id()).next().value("boolean")); assertEquals((short) 2, this.sqlgGraph.traversal().V(v2.id()).next().<Short>value("short").shortValue()); assertEquals(2, this.sqlgGraph.traversal().V(v2.id()).next().<Integer>value("integer").intValue()); assertEquals(2L, this.sqlgGraph.traversal().V(v2.id()).next().<Long>value("long").longValue(), 0); assertEquals(2F, this.sqlgGraph.traversal().V(v2.id()).next().<Float>value("float").floatValue(), 0); assertEquals(2D, this.sqlgGraph.traversal().V(v2.id()).next().<Double>value("double").doubleValue(), 0); }
#vulnerable code @Test public void testBatchUpdatePersistentVerticesAllTypes() { Assume.assumeTrue(this.sqlgGraph.features().vertex().properties().supportsFloatValues()); Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person", "name", "a"); Vertex v2 = this.sqlgGraph.addVertex(T.label, "Person", "surname", "b"); this.sqlgGraph.tx().commit(); assertEquals("a", this.sqlgGraph.v(v1.id()).value("name")); assertEquals("b", this.sqlgGraph.v(v2.id()).value("surname")); this.sqlgGraph.tx().rollback(); this.sqlgGraph.tx().normalBatchModeOn(); v1.property("name", "aa"); v1.property("boolean", true); v1.property("short", (short) 1); v1.property("integer", 1); v1.property("long", 1L); v1.property("float", 1F); v1.property("double", 1D); v2.property("surname", "bb"); v2.property("boolean", false); v2.property("short", (short) 2); v2.property("integer", 2); v2.property("long", 2L); v2.property("float", 2F); v2.property("double", 2D); this.sqlgGraph.tx().commit(); assertEquals("aa", this.sqlgGraph.v(v1.id()).value("name")); assertEquals(true, this.sqlgGraph.v(v1.id()).value("boolean")); assertEquals((short) 1, this.sqlgGraph.v(v1.id()).<Short>value("short").shortValue()); assertEquals(1, this.sqlgGraph.v(v1.id()).<Integer>value("integer").intValue()); assertEquals(1L, this.sqlgGraph.v(v1.id()).<Long>value("long").longValue(), 0); assertEquals(1F, this.sqlgGraph.v(v1.id()).<Float>value("float").floatValue(), 0); assertEquals(1D, this.sqlgGraph.v(v1.id()).<Double>value("double").doubleValue(), 0); assertEquals("bb", this.sqlgGraph.v(v2.id()).value("surname")); assertEquals(false, this.sqlgGraph.v(v2.id()).value("boolean")); assertEquals((short) 2, this.sqlgGraph.v(v2.id()).<Short>value("short").shortValue()); assertEquals(2, this.sqlgGraph.v(v2.id()).<Integer>value("integer").intValue()); assertEquals(2L, this.sqlgGraph.v(v2.id()).<Long>value("long").longValue(), 0); assertEquals(2F, this.sqlgGraph.v(v2.id()).<Float>value("float").floatValue(), 0); assertEquals(2D, this.sqlgGraph.v(v2.id()).<Double>value("double").doubleValue(), 0); } #location 10 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testLoadingLocalDateTime() throws Exception { Vertex v = this.sqlgGraph.addVertex(T.label, "Person", "createOn", LocalDateTime.now()); this.sqlgGraph.tx().commit(); this.sqlgGraph.close(); //noinspection Duplicates try (SqlgGraph sqlgGraph1 = SqlgGraph.open(configuration)) { Vertex vv = sqlgGraph1.traversal().V(v.id()).next(); Assert.assertTrue(vv.property("createOn").isPresent()); Map<String, PropertyType> propertyTypeMap = sqlgGraph1.getTopology().getAllTables().get(SchemaTable.of( sqlgGraph1.getSqlDialect().getPublicSchema(), "V_Person").toString()); Assert.assertTrue(propertyTypeMap.containsKey("createOn")); sqlgGraph1.tx().rollback(); } }
#vulnerable code @Test public void testLoadingLocalDateTime() throws Exception { Vertex v = this.sqlgGraph.addVertex(T.label, "Person", "createOn", LocalDateTime.now()); this.sqlgGraph.tx().commit(); this.sqlgGraph.close(); //noinspection Duplicates try (SqlgGraph sqlgGraph1 = SqlgGraph.open(configuration)) { Vertex vv = sqlgGraph1.traversal().V(v.id()).next(); assertTrue(vv.property("createOn").isPresent()); Map<String, PropertyType> propertyTypeMap = sqlgGraph1.getTopology().getAllTables().get(SchemaTable.of( sqlgGraph1.getSqlDialect().getPublicSchema(), "V_Person").toString()); assertTrue(propertyTypeMap.containsKey("createOn")); sqlgGraph1.tx().rollback(); } } #location 10 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testVertexTransactionalCache() { Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person"); Vertex v2 = this.sqlgGraph.addVertex(T.label, "Person"); Vertex v3 = this.sqlgGraph.addVertex(T.label, "Person"); v1.addEdge("friend", v2); Assert.assertEquals(1, vertexTraversal(v1).out("friend").count().next().intValue()); Vertex tmpV1 = this.sqlgGraph.traversal().V(v1.id()).next(); tmpV1.addEdge("foe", v3); //this should fail as v1's out edges will not be updated Assert.assertEquals(1, vertexTraversal(tmpV1).out("foe").count().next().intValue()); Assert.assertEquals(1, vertexTraversal(v1).out("foe").count().next().intValue()); this.sqlgGraph.tx().rollback(); }
#vulnerable code @Test public void testVertexTransactionalCache() { Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person"); Vertex v2 = this.sqlgGraph.addVertex(T.label, "Person"); Vertex v3 = this.sqlgGraph.addVertex(T.label, "Person"); v1.addEdge("friend", v2); Assert.assertEquals(1, vertexTraversal(v1).out("friend").count().next().intValue()); Vertex tmpV1 = this.sqlgGraph.v(v1.id()); tmpV1.addEdge("foe", v3); //this should fail as v1's out edges will not be updated Assert.assertEquals(1, vertexTraversal(tmpV1).out("foe").count().next().intValue()); Assert.assertEquals(1, vertexTraversal(v1).out("foe").count().next().intValue()); this.sqlgGraph.tx().rollback(); } #location 9 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testCreateEdgeBetweenVertices() { Vertex person1 = this.sqlgGraph.addVertex(T.label, "Person"); Vertex person2 = this.sqlgGraph.addVertex(T.label, "Person"); this.sqlgGraph.tx().commit(); person1 = this.sqlgGraph.traversal().V(person1.id()).next(); person2 = this.sqlgGraph.traversal().V(person2.id()).next(); person1.addEdge("friend", person2); this.sqlgGraph.tx().commit(); Assert.assertEquals(1, vertexTraversal(person1).out("friend").count().next().intValue()); Assert.assertEquals(1, vertexTraversal(person2).in("friend").count().next().intValue()); }
#vulnerable code @Test public void testCreateEdgeBetweenVertices() { Vertex person1 = this.sqlgGraph.addVertex(T.label, "Person"); Vertex person2 = this.sqlgGraph.addVertex(T.label, "Person"); this.sqlgGraph.tx().commit(); person1 = this.sqlgGraph.v(person1.id()); person2 = this.sqlgGraph.v(person2.id()); person1.addEdge("friend", person2); this.sqlgGraph.tx().commit(); Assert.assertEquals(1, vertexTraversal(person1).out("friend").count().next().intValue()); Assert.assertEquals(1, vertexTraversal(person2).in("friend").count().next().intValue()); } #location 8 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testPropertiesNotBeingCachedOnVertexOut() { Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person"); Vertex v2 = this.sqlgGraph.addVertex(T.label, "Car", "name", "a"); Vertex v3 = this.sqlgGraph.addVertex(T.label, "Car", "name", "b"); Vertex v4 = this.sqlgGraph.addVertex(T.label, "Car", "name", "c"); v1.addEdge("car", v2); v1.addEdge("car", v3); v1.addEdge("car", v4); this.sqlgGraph.tx().commit(); v1 = this.sqlgGraph.traversal().V(v1.id()).next(); List<Vertex> cars = vertexTraversal(v1).out("car").toList(); Assert.assertEquals(3, cars.size()); }
#vulnerable code @Test public void testPropertiesNotBeingCachedOnVertexOut() { Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person"); Vertex v2 = this.sqlgGraph.addVertex(T.label, "Car", "name", "a"); Vertex v3 = this.sqlgGraph.addVertex(T.label, "Car", "name", "b"); Vertex v4 = this.sqlgGraph.addVertex(T.label, "Car", "name", "c"); v1.addEdge("car", v2); v1.addEdge("car", v3); v1.addEdge("car", v4); this.sqlgGraph.tx().commit(); v1 = this.sqlgGraph.v(v1.id()); List<Vertex> cars = vertexTraversal(v1).out("car").toList(); Assert.assertEquals(3, cars.size()); } #location 16 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testIdNotLoadedAsProperty() throws Exception { Vertex v = this.sqlgGraph.addVertex(T.label, "Person", "name", "a"); this.sqlgGraph.tx().commit(); this.sqlgGraph.close(); try (SqlgGraph sqlgGraph1 = SqlgGraph.open(configuration)) { Vertex vv = sqlgGraph1.traversal().V(v.id()).next(); Assert.assertFalse(vv.property("ID").isPresent()); Map<String, PropertyType> propertyTypeMap = sqlgGraph1.getTopology().getAllTables().get(SchemaTable.of( sqlgGraph1.getSqlDialect().getPublicSchema(), "V_Person").toString()); Assert.assertFalse(propertyTypeMap.containsKey("ID")); sqlgGraph1.tx().rollback(); } }
#vulnerable code @Test public void testIdNotLoadedAsProperty() throws Exception { Vertex v = this.sqlgGraph.addVertex(T.label, "Person", "name", "a"); this.sqlgGraph.tx().commit(); this.sqlgGraph.close(); try (SqlgGraph sqlgGraph1 = SqlgGraph.open(configuration)) { Vertex vv = sqlgGraph1.traversal().V(v.id()).next(); assertFalse(vv.property("ID").isPresent()); Map<String, PropertyType> propertyTypeMap = sqlgGraph1.getTopology().getAllTables().get(SchemaTable.of( sqlgGraph1.getSqlDialect().getPublicSchema(), "V_Person").toString()); assertFalse(propertyTypeMap.containsKey("ID")); sqlgGraph1.tx().rollback(); } } #location 9 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testLoadingDatasourceFromJndi() throws Exception { SqlgGraph g = SqlgGraph.open(configuration); assertNotNull(g.getSqlDialect()); assertEquals(configuration.getString("jdbc.url"), g.getJdbcUrl()); assertNotNull(g.getConnection()); }
#vulnerable code @Test public void testLoadingDatasourceFromJndi() throws Exception { SqlgGraph g = SqlgGraph.open(configuration); assertNotNull(g.getSqlDialect()); assertNotNull(g.getSqlgDataSource().get(configuration.getString("jdbc.url"))); } #location 5 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @POST @Path(value = "test") @Produces(MediaType.APPLICATION_JSON) public Map<String, Object> test(@Context Repository repository, @FormParam(Notifier.JENKINS_BASE) String jenkinsBase, @FormParam(Notifier.CLONE_TYPE) String cloneType, @FormParam(Notifier.CLONE_URL) String cloneUrl, @FormParam(Notifier.IGNORE_CERTS) boolean ignoreCerts, @FormParam(Notifier.OMIT_HASH_CODE) boolean omitHashCode) { if (jenkinsBase == null || cloneType == null || (cloneType.equals("custom") && cloneUrl == null)) { Map<String, Object> map = new HashMap<String, Object>(); map.put("successful", false); map.put("message", "Settings must be configured"); return map; } permissionService.validateForRepository(repository, Permission.REPO_ADMIN); log.debug("Triggering jenkins notification for repository {}/{}", repository.getProject().getKey(), repository.getSlug()); /* @todo [email protected]: Send null instead of master and sha1 and * handle this in notify */ NotificationResult result = notifier.notify(repository, jenkinsBase, ignoreCerts, cloneType, cloneUrl, null, null, omitHashCode, true); log.debug("Got response from jenkins: {}", result); // Shouldn't have to do this but the result isn't being marshalled correctly Map<String, Object> map = new HashMap<String, Object>(); map.put("successful", result.isSuccessful()); map.put("url", result.getUrl()); map.put("message", result.getMessage()); return map; }
#vulnerable code @POST @Path(value = "test") @Produces(MediaType.APPLICATION_JSON) public Map<String, Object> test(@Context Repository repository, @FormParam(Notifier.JENKINS_BASE) String jenkinsBase, @FormParam(Notifier.CLONE_TYPE) String cloneType, @FormParam(Notifier.CLONE_URL) String cloneUrl, @FormParam(Notifier.IGNORE_CERTS) boolean ignoreCerts, @FormParam(Notifier.OMIT_HASH_CODE) boolean omitHashCode) { if (jenkinsBase == null || cloneType == null || (cloneType.equals("custom") && cloneUrl == null)) { Map<String, Object> map = new HashMap<String, Object>(); map.put("successful", false); map.put("message", "Settings must be configured"); return map; } permissionService.validateForRepository(repository, Permission.REPO_ADMIN); log.debug("Triggering jenkins notification for repository {}/{}", repository.getProject().getKey(), repository.getSlug()); /* @todo [email protected]: Send null instead of master and sha1 and * handle this in notify */ NotificationResult result = notifier.notify(repository, jenkinsBase, ignoreCerts, cloneType, cloneUrl, null, null, omitHashCode); log.debug("Got response from jenkins: {}", result); // Shouldn't have to do this but the result isn't being marshalled correctly Map<String, Object> map = new HashMap<String, Object>(); map.put("successful", result.isSuccessful()); map.put("url", result.getUrl()); map.put("message", result.getMessage()); return map; } #location 31 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public static void main(String[] args) throws Exception { try (MongoClient client = new MongoClient(Connection.URI)) { MongoCollection<Document> eventCollection = client.getDatabase("test").getCollection("events"); long i = 0; while (true) { Document doc = new Document(); doc.put("i", i++); doc.put("even", i % 2); eventCollection.insertOne(doc); //System.out.println("inserted: " + doc); Thread.sleep(2000L + (long)(1000*Math.random())); } } }
#vulnerable code public static void main(String[] args) throws Exception { MongoCollection<Document> eventCollection = new MongoClient( new MongoClientURI("mongodb://localhost:27001,localhost:27002,localhost:27003/test?replicatSet=demo-dev") ).getDatabase("test").getCollection("events"); long i = 0; while (true) { Document doc = new Document(); doc.put("i", i++); doc.put("even", i % 2); eventCollection.insertOne(doc); //System.out.println("inserted: " + doc); Thread.sleep(2000L + (long)(1000*Math.random())); } } #location 6 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public static void main(String[] args) throws Exception { try (MongoClient client = new MongoClient(Connection.URI)) { MongoCollection<Document> eventCollection = client.getDatabase("test").getCollection("events"); ChangeStreamIterable<Document> changes = eventCollection.watch(asList( Aggregates.match( and( asList( in("operationType", asList("insert")), eq("fullDocument.even", 1L))) ))); changes.iterator().forEachRemaining( change -> System.out.println("received: " + change.getFullDocument()) ); } }
#vulnerable code public static void main(String[] args) throws Exception { MongoCollection<Document> eventCollection = new MongoClient( new MongoClientURI("mongodb://localhost:27001,localhost:27002,localhost:27003/test?replicatSet=demo-dev") ).getDatabase("test").getCollection("events"); ChangeStreamIterable<Document> changes = eventCollection.watch(asList( Aggregates.match( and( asList( in("operationType", asList("insert")), eq("fullDocument.even", 1L))) ))); changes.forEach(new Block<ChangeStreamDocument<Document>>() { @Override public void apply(ChangeStreamDocument<Document> t) { System.out.println("received: " + t.getFullDocument()); } }); } #location 6 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code void invertLookupTable() { int nImages = getStackSize(); ip.invertLut(); if (nImages==1) ip.invert(); else { ImageStack stack2 = getStack(); for (int i=1; i<=nImages; i++) stack2.getProcessor(i).invert(); stack2.setColorModel(ip.getColorModel()); } }
#vulnerable code void undoFilter() { if (ip!=null) { ip.reset(); updateAndDraw(); } } #location 4 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void setPixels(Object pixels) { setImagePlanePixels(this.imageData, getPlanePosition(), pixels); }
#vulnerable code @Override public void setPixels(Object pixels) { int[] position = Index.create(0, 0, getPlanePosition()); setImagePlanePixels(this.imageData, position, pixels); } #location 4 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public AttributeBuilder nodeAttributes(String groupId, String artifactId, String version, String scopes, String effectiveScope) { AbstractNode node = this.scopeStyles.containsKey(effectiveScope) ? this.scopeStyles.get(effectiveScope) : this.defaultNode; return node.createAttributes(groupId, artifactId, version, scopes, node != this.defaultNode); }
#vulnerable code public AttributeBuilder nodeAttributes(String groupId, String artifactId, String version, String scopes, String effectiveScope) { Map<String, ? extends AbstractNode> scopedNodes = getScopedNodes(); AbstractNode node = scopedNodes.containsKey(effectiveScope) ? scopedNodes.get(effectiveScope) : getDefaultNode(); return node.createAttributes(groupId, artifactId, version, scopes, node != this.defaultNode && node != EMPTY_NODE); } #location 4 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private String downloadNzb(SearchResultEntity result) throws IOException { Request request = new Request.Builder().url(result.getLink()).build(); Indexer indexerByName = searchModuleProvider.getIndexerByName(result.getIndexer().getName()); Integer timeout = indexerByName.getConfig().getTimeout().orElse(configProvider.getBaseConfig().getSearching().getTimeout()); try (Response response = clientHttpRequestFactory.getOkHttpClientBuilder(request.url().uri()).readTimeout(timeout, TimeUnit.SECONDS).connectTimeout(timeout, TimeUnit.SECONDS).build().newCall(request).execute()) { if (!response.isSuccessful()) { throw new IOException("Unsuccessful NZB download from URL " + result.getLink() + ". Message: " + response.message()); } ResponseBody body = response.body(); if (body == null || Strings.isNullOrEmpty(body.string())) { throw new IOException("NZB downloaded from " + result.getLink() + " is empty"); } String content = body.string(); return content; } }
#vulnerable code private String downloadNzb(SearchResultEntity result) throws IOException { Request request = new Request.Builder().url(result.getLink()).build(); Indexer indexerByName = searchModuleProvider.getIndexerByName(result.getIndexer().getName()); Integer timeout = indexerByName.getConfig().getTimeout().orElse(configProvider.getBaseConfig().getSearching().getTimeout()); try (Response response = clientHttpRequestFactory.getOkHttpClientBuilder(request.url().uri()).readTimeout(timeout, TimeUnit.SECONDS).connectTimeout(timeout, TimeUnit.SECONDS).build().newCall(request).execute()) { return response.body().string(); } } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @PreDestroy public void destroy() { boolean isOsWindows = isOsWindows(); if (isOsWindows) { logger.debug("Initiating removal of windows tray icon (if it exists)"); try { WindowsTrayIcon.remove(); } catch (Throwable e) { //An exception might be thrown while shutting down, ignore this } } logger.info("Shutting down"); }
#vulnerable code @PreDestroy public void destroy() { String osName = System.getProperty("os.name"); boolean isOsWindows = osName.toLowerCase().contains("windows"); if (isOsWindows) { logger.debug("Initiating removal of windows tray icon (if it exists)"); try { WindowsTrayIcon.remove(); } catch (Throwable e) { //An exception might be thrown while shutting down, ignore this } } logger.info("Shutting down"); } #location 4 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public void testExecute() throws Exception { MockWebServer server = getMockWebServer(); HttpUrl url = server.url("/repos/theotherp/nzbhydra2/releases"); //Here the magic happens File pom = getTestFile("/src/test/resources/org/nzbhydra/github/mavenreleaseplugin/pomWithToken.xml"); assertTrue(pom.exists()); ReleaseMojo releaseMojo = new ReleaseMojo(); releaseMojo = (ReleaseMojo) configureMojo(releaseMojo, extractPluginConfiguration("github-release-plugin", pom )); releaseMojo.githubReleasesUrl = url.toString(); releaseMojo.windowsAsset = getTestFile("src/test/resources/org/nzbhydra/github/mavenreleaseplugin/windowsAsset.txt"); releaseMojo.linuxAsset = getTestFile("src/test/resources/org/nzbhydra/github/mavenreleaseplugin/linuxAsset.txt"); releaseMojo.execute(); verifyExecution(server); }
#vulnerable code public void testExecute() throws Exception { MockWebServer server = new MockWebServer(); Release draftReleaseResponse = new Release(); draftReleaseResponse.setUploadUrl(server.url("/repos/theotherp/nzbhydra2/releases/1/assets").toString()); draftReleaseResponse.setUrl(server.url("/repos/theotherp/nzbhydra2/releases/1").toString()); draftReleaseResponse.setDraft(true); ArrayList<Asset> assets = new ArrayList<>(); assets.add(new Asset()); assets.add(new Asset()); draftReleaseResponse.setAssets(assets); Release effectiveReleaseResponse = new Release(); effectiveReleaseResponse.setDraft(false); MockResponse releaseMockResponse = new MockResponse() .setResponseCode(200) .setBody(objectMapper.writeValueAsString(draftReleaseResponse)); server.enqueue(releaseMockResponse); server.enqueue(new MockResponse().setResponseCode(200)); //Windows asset upload server.enqueue(new MockResponse().setResponseCode(200)); //Linux asset upload server.enqueue(new MockResponse().setResponseCode(200).setBody(objectMapper.writeValueAsString(effectiveReleaseResponse))); //Setting the release effective HttpUrl url = server.url("/repos/theotherp/nzbhydra2/releases"); //Here the magic happens executePlugin(url); //Creating the release verifyDraftReleaseIsCreated(server); //Uploading the assets RecordedRequest windowsAssetUploadRequest = server.takeRequest(2, TimeUnit.SECONDS); assertTrue(windowsAssetUploadRequest.getPath(), windowsAssetUploadRequest.getPath().endsWith("releases/1/assets?name=windowsAsset.txt")); RecordedRequest linuxAssetUploadRequest = server.takeRequest(2, TimeUnit.SECONDS); assertTrue(linuxAssetUploadRequest.getPath(), linuxAssetUploadRequest.getPath().endsWith("releases/1/assets?name=linuxAsset.txt")); //Setting it effective RecordedRequest setEffectiveRequest = server.takeRequest(2, TimeUnit.SECONDS); assertTrue(setEffectiveRequest.getPath(), setEffectiveRequest.getPath().endsWith("releases/1")); String body = new String(setEffectiveRequest.getBody().readByteArray()); Release bodyJson = objectMapper.readValue(body, Release.class); assertFalse(bodyJson.isDraft()); } #location 33 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @PostConstruct private void addTrayIconIfApplicable() { boolean isOsWindows = isOsWindows(); if (isOsWindows) { logger.info("Adding windows system tray icon"); try { new WindowsTrayIcon(); } catch (HeadlessException e) { logger.error("Can't add a windows tray icon because running headless"); } } }
#vulnerable code @PostConstruct private void addTrayIconIfApplicable() { String osName = System.getProperty("os.name"); boolean isOsWindows = osName.toLowerCase().contains("windows"); if (isOsWindows) { logger.info("Adding windows system tray icon"); try { new WindowsTrayIcon(); } catch (HeadlessException e) { logger.error("Can't add a windows tray icon because running headless"); } } } #location 4 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void shouldSetEnabledOnDownloadEvent() { testee.queueCheckEnabled = false; testee.lastDownload = null; testee.onNzbDownloadEvent(new FileDownloadEvent(new FileDownloadEntity(), new SearchResultEntity())); assertThat(testee.queueCheckEnabled).isTrue(); assertThat(testee.lastDownload).isNotNull(); }
#vulnerable code @Test public void shouldSetEnabledOnDownloadEvent() { testee.queueCheckEnabled = false; testee.lastDownload = null; testee.onNzbDownloadEvent(new FileDownloadEvent(null, null)); assertThat(testee.queueCheckEnabled).isTrue(); assertThat(testee.lastDownload).isNotNull(); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void shouldMigrate() { BaseConfig input = new BaseConfig(); input.getMain().setConfigVersion(1); BaseConfig afterMigration = new BaseConfig(); afterMigration.getMain().setConfigVersion(2); when(configMigrationStepMock.forVersion()).thenReturn(1); when(configMigrationStepMock.migrate(any())).thenReturn(afterMigration); testee.steps = Arrays.asList(configMigrationStepMock); testee.expectedConfigVersion = 2; BaseConfig result = testee.migrate(input); verify(configMigrationStepMock).migrate(input); assertThat(result.getMain().getConfigVersion()).isEqualTo(2); }
#vulnerable code @Test public void shouldMigrate() { HashMap<String, Object> input = new HashMap<>(ImmutableMap.of("main", new HashMap<>(ImmutableMap.of("configVersion", 1)))); HashMap<String, Object> afterMigration = new HashMap<>(ImmutableMap.of("main", new HashMap<>(ImmutableMap.of("configVersion", 2)))); when(configMigrationStepMock.forVersion()).thenReturn(1); when(configMigrationStepMock.migrate(any())).thenReturn(afterMigration); testee.steps = Arrays.asList(configMigrationStepMock); testee.expectedConfigVersion = 2; Map<String, Object> result = testee.migrate(input); verify(configMigrationStepMock).migrate(input); assertThat((int) ((Map<String, Object>) result.get("main")).get("configVersion")).isEqualTo(2); } #location 14 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @RequestMapping(value = {"/api", "/dognzb/api"}, produces = MediaType.TEXT_XML_VALUE) public ResponseEntity<? extends Object> api(NewznabParameters params, HttpServletRequest request) throws Exception { if (params.getT() == ActionAttribute.CAPS) { //throw new RuntimeException("test"); return new ResponseEntity<Object>(NewznabMockBuilder.getCaps(), HttpStatus.OK); } if (params.getT() == ActionAttribute.GETNFO) { NewznabXmlRoot rssRoot = new NewznabXmlRoot(); rssRoot.getRssChannel().setNewznabResponse(new NewznabXmlResponse(0, 1)); NewznabXmlItem item = new NewznabXmlItem(); item.setDescription("NFO for NZB with ID " + params.getId()); rssRoot.getRssChannel().getItems().add(item); return ResponseEntity.ok(rssRoot); } String itemTitleBase = params.getApikey(); if (params.getQ() != null && params.getQ().contains("groups")) { itemTitleBase = ""; } if (params.getRid() != null && params.getQ() == null) { NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(0, -1, itemTitleBase, false, Collections.emptyList()); logger.info("Returning no results for rid based search without query"); return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } boolean doGenerateDuplicates = "duplicates".equals(params.getQ()); if (params.getQ() != null && params.getQ().equals("offsettest")) { NewznabXmlRoot rssRoot = new NewznabXmlRoot(); rssRoot.getRssChannel().setNewznabResponse(new NewznabXmlResponse(0, 0)); if (params.getOffset() >= 40) { return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } int start = params.getOffset() == 0 ? 0 : params.getOffset(); int end = Math.min(start + 10 - 1, 40); rssRoot = NewznabMockBuilder.generateResponse(start, end, "offsetTest", doGenerateDuplicates, Collections.emptyList()); rssRoot.getRssChannel().getNewznabResponse().setTotal(40); return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } if (params.getQ() != null && params.getQ().equals("offsettest2")) { NewznabMockRequest mockRequest = NewznabMockRequest.builder().numberOfResults(100).titleBase("offsettest").offset(params.getOffset()).titleWords(Collections.emptyList()).total(300).build(); NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(mockRequest); return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } if (params.getQ() != null && params.getQ().equals("invalidxml")) { String invalidXml = Resources.toString(Resources.getResource(MockNewznab.class, "invalidXml.xml"), Charsets.UTF_8); return new ResponseEntity<Object>(invalidXml, HttpStatus.OK); } if (params.getQ() != null && params.getQ().equals("slash")) { NewznabMockRequest mockRequest = NewznabMockRequest.builder().numberOfResults(100).titleBase("/").offset(params.getOffset()).titleWords(Collections.emptyList()).total(300).build(); NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(mockRequest); return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } if (params.getQ() != null && params.getQ().equals("actualduplicates")) { NewznabMockRequest mockRequest = NewznabMockRequest.builder().numberOfResults(10).titleBase("actualduplicates").offset(params.getOffset()).titleWords(Collections.emptyList()).total(10).build(); NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(mockRequest); rssRoot.getRssChannel().getItems().forEach(x -> x.setTitle(rssRoot.getRssChannel().getItems().get(0).getTitle())); rssRoot.getRssChannel().getItems().forEach(x -> x.setLink(rssRoot.getRssChannel().getItems().get(0).getLink())); rssRoot.getRssChannel().getItems().forEach(x -> x.setRssGuid(rssRoot.getRssChannel().getItems().get(0).getRssGuid())); rssRoot.getRssChannel().getItems().forEach(x -> x.setNewznabAttributes(rssRoot.getRssChannel().getItems().get(0).getNewznabAttributes())); return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } if (params.getQ() != null && params.getQ().equals("oneresult")) { NewznabMockRequest mockRequest = NewznabMockRequest.builder().numberOfResults(1).titleBase("oneresult").offset(params.getOffset()).titleWords(Collections.emptyList()).total(1).build(); NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(mockRequest); return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } if (params.getQ() != null && params.getQ().equals("uitest")) { if (params.getApikey().equals("1")) { NewznabXmlItem result1 = RssItemBuilder.builder("indexer1-result1").pubDate(Instant.now().minus(1, ChronoUnit.DAYS)).hasNfo(false).grabs(1).size(mbToBytes(1)).newznabAttributes(new ArrayList<>(Arrays.asList(new NewznabAttribute("category", "5000")))).category("TV").build(); NewznabXmlItem result2 = RssItemBuilder.builder("indexer1-result2").pubDate(Instant.now().minus(2, ChronoUnit.DAYS)).hasNfo(true).grabs(2).size(mbToBytes(2)).newznabAttributes(new ArrayList<>(Arrays.asList(new NewznabAttribute("category", "5040")))).category("TV SD").build(); NewznabXmlItem result3 = RssItemBuilder.builder("indexer1-result3").pubDate(Instant.now().minus(3, ChronoUnit.DAYS)).comments("comments").grabs(3).size(mbToBytes(3)).newznabAttributes(new ArrayList<>(Arrays.asList(new NewznabAttribute("category", "5030")))).category("TV HD").build(); NewznabXmlRoot rssRoot = NewznabMockBuilder.getRssRoot(Arrays.asList(result1, result2, result3), 0, 3); return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } NewznabXmlItem result4 = RssItemBuilder.builder("indexer2-result1").pubDate(Instant.now().minus(4, ChronoUnit.DAYS)).grabs(4).size(mbToBytes(4)).newznabAttributes(new ArrayList<>(Arrays.asList(new NewznabAttribute("category", "2000")))).category("Movies").build(); NewznabXmlItem result5 = RssItemBuilder.builder("indexer2-result2").pubDate(Instant.now().minus(5, ChronoUnit.DAYS)).grabs(5).size(mbToBytes(5)).newznabAttributes(new ArrayList<>(Arrays.asList(new NewznabAttribute("category", "2040")))).category("Movies HD").build(); NewznabXmlRoot rssRoot = NewznabMockBuilder.getRssRoot(Arrays.asList(result4, result5), 0, 2); return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } if (params.getQ() != null && params.getQ().equals("dognzbtotaltest") && System.getProperty("nomockdognzb") == null) { if (params.getOffset() >= 300) { NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(0, -1, itemTitleBase, false, Collections.emptyList()); return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } NewznabMockRequest mockRequest = NewznabMockRequest.builder().numberOfResults(100).titleBase("dognzbtotaltest").offset(params.getOffset()).titleWords(Collections.emptyList()).total(300).build(); NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(mockRequest); rssRoot.getRssChannel().getNewznabResponse().setTotal(100); return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } if ((params.getQ() != null && params.getQ().equals("noresults")) || (params.getTvdbid() != null && params.getTvdbid().equals("329089"))) { NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(0, -1, itemTitleBase, false, Collections.emptyList()); return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } if (params.getQ() != null && params.getQ().equals("sleep")) { Thread.sleep(new Random().nextInt(5000)); } if (params.getQ() != null && params.getQ().equals("sleep10")) { Thread.sleep(10000); } if (params.getQ() != null && params.getQ().equals("sleepforever")) { Thread.sleep(10000 * 10000); } if (params.getQ() != null && params.getQ().contains("movies")) { NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(0, 100, itemTitleBase, false, Arrays.asList("cam", "ts", "blu-ray 2160p", "web-dl 1080p", "bluray 1080p", "3d bluray")); rssRoot.getRssChannel().getNewznabResponse().setTotal(100); rssRoot.getRssChannel().getItems().forEach(x -> x.getNewznabAttributes().add(new NewznabAttribute("coverurl", "https://i.omgwtfnzbs.me/tvdb/697fdaeb0fb1ac87d4d6af684b20593a/697fdaeb0fb1ac87d4d6af684b20593a.jpg"))); return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } if ("oneduplicate".equals(params.getQ())) { NewznabMockRequest mockRequest = NewznabMockRequest.builder() .numberOfResults(1) .titleBase(itemTitleBase) .generateOneDuplicate(true) .build(); NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(mockRequest); rssRoot.getRssChannel().getNewznabResponse().setTotal(1); return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } if ("titlegroup".equals(params.getQ())) { NewznabMockRequest mockRequest = NewznabMockRequest.builder() .numberOfResults(1) .titleBase(itemTitleBase) .generateOneDuplicate(false) .build(); NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(mockRequest); rssRoot.getRssChannel().getNewznabResponse().setTotal(1); rssRoot.getRssChannel().getItems().forEach(x -> x.setTitle("titlegroup")); return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } if (params.getTmdbid() != null) { if (itemTitleBase.equals("tmdberror") || "capscheckerror".equals(params.getApikey())) { NewznabXmlError rssError = new NewznabXmlError("123", "description"); return new ResponseEntity<Object>(rssError, HttpStatus.OK); } NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(0, 10, "avengers", doGenerateDuplicates, Collections.emptyList()); return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } if ("error".equals(params.getQ())) { NewznabXmlError rssError = new NewznabXmlError("123", "description"); return new ResponseEntity<Object>(rssError, HttpStatus.OK); } if (params.getImdbid() != null) { NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(0, 10, "avengers", doGenerateDuplicates, Collections.emptyList()); rssRoot.getRssChannel().getNewznabResponse().setTotal(10); return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } int endIndex; int key = 0; try { key = Integer.valueOf(itemTitleBase); } catch (NumberFormatException e) { endIndex = 0; } if (apikeyToResultCount.containsKey(key)) { endIndex = apikeyToResultCount.get(key); } else { endIndex = 0; } if (responsesPerApikey.containsKey(endIndex)) { return new ResponseEntity<Object>(responsesPerApikey.get(endIndex), HttpStatus.OK); } else { if (params.getOffset() != null && params.getLimit() != null) { endIndex = Math.min(params.getOffset() + params.getLimit(), endIndex); } NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(0, endIndex, itemTitleBase, doGenerateDuplicates, Collections.emptyList()); rssRoot.getRssChannel().getNewznabResponse().setTotal(endIndex); if ("randomage".equalsIgnoreCase(params.getQ())) { for (NewznabXmlItem item : rssRoot.getRssChannel().getItems()) { item.setPubDate(item.getPubDate().minus(random.nextInt(300) * 24, ChronoUnit.HOURS)); } } return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } }
#vulnerable code @RequestMapping(value = {"/api", "/dognzb/api"}, produces = MediaType.TEXT_XML_VALUE) public ResponseEntity<? extends Object> api(NewznabParameters params, HttpServletRequest request) throws Exception { if (params.getT() == ActionAttribute.CAPS) { //throw new RuntimeException("test"); return new ResponseEntity<Object>(NewznabMockBuilder.getCaps(), HttpStatus.OK); } if (params.getT() == ActionAttribute.GETNFO) { NewznabXmlRoot rssRoot = new NewznabXmlRoot(); rssRoot.getRssChannel().setNewznabResponse(new NewznabXmlResponse(0, 1)); NewznabXmlItem item = new NewznabXmlItem(); item.setDescription("NFO for NZB with ID " + params.getId()); rssRoot.getRssChannel().getItems().add(item); return ResponseEntity.ok(rssRoot); } String itemTitleBase = params.getApikey(); if (params.getQ() != null && params.getQ().contains("groups")) { itemTitleBase = ""; } if (params.getRid() != null && params.getQ() == null) { NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(0, -1, itemTitleBase, false, Collections.emptyList()); logger.info("Returning no results for rid based search without query"); return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } boolean doGenerateDuplicates = "duplicates".equals(params.getQ()); if (params.getQ() != null && params.getQ().equals("offsettest")) { NewznabXmlRoot rssRoot = new NewznabXmlRoot(); rssRoot.getRssChannel().setNewznabResponse(new NewznabXmlResponse(0, 0)); if (params.getOffset() >= 40) { return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } int start = params.getOffset() == 0 ? 0 : params.getOffset(); int end = Math.min(start + 10 - 1, 40); rssRoot = NewznabMockBuilder.generateResponse(start, end, "offsetTest", doGenerateDuplicates, Collections.emptyList()); rssRoot.getRssChannel().getNewznabResponse().setTotal(40); return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } if (params.getQ() != null && params.getQ().equals("offsettest2")) { NewznabMockRequest mockRequest = NewznabMockRequest.builder().numberOfResults(100).titleBase("offsettest").offset(params.getOffset()).titleWords(Collections.emptyList()).total(300).build(); NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(mockRequest); return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } if (params.getQ() != null && params.getQ().equals("invalidxml")) { String invalidXml = Resources.toString(Resources.getResource(MockNewznab.class, "invalidXml.xml"), Charsets.UTF_8); return new ResponseEntity<Object>(invalidXml, HttpStatus.OK); } if (params.getQ() != null && params.getQ().equals("slash")) { NewznabMockRequest mockRequest = NewznabMockRequest.builder().numberOfResults(100).titleBase("/").offset(params.getOffset()).titleWords(Collections.emptyList()).total(300).build(); NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(mockRequest); return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } if (params.getQ() != null && params.getQ().equals("actualduplicates")) { NewznabMockRequest mockRequest = NewznabMockRequest.builder().numberOfResults(10).titleBase("actualduplicates").offset(params.getOffset()).titleWords(Collections.emptyList()).total(10).build(); NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(mockRequest); rssRoot.getRssChannel().getItems().forEach(x -> x.setTitle(rssRoot.getRssChannel().getItems().get(0).getTitle())); rssRoot.getRssChannel().getItems().forEach(x -> x.setLink(rssRoot.getRssChannel().getItems().get(0).getLink())); rssRoot.getRssChannel().getItems().forEach(x -> x.setRssGuid(rssRoot.getRssChannel().getItems().get(0).getRssGuid())); rssRoot.getRssChannel().getItems().forEach(x -> x.setNewznabAttributes(rssRoot.getRssChannel().getItems().get(0).getNewznabAttributes())); return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } if (params.getQ() != null && params.getQ().equals("oneresult")) { NewznabMockRequest mockRequest = NewznabMockRequest.builder().numberOfResults(1).titleBase("oneresult").offset(params.getOffset()).titleWords(Collections.emptyList()).total(1).build(); NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(mockRequest); return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } if (params.getQ() != null && params.getQ().equals("uitest")) { if (params.getApikey().equals("1")) { NewznabXmlItem result1 = RssItemBuilder.builder("indexer1-result1").pubDate(Instant.now().minus(1, ChronoUnit.DAYS)).hasNfo(false).grabs(1).size(mbToBytes(1)).newznabAttributes(new ArrayList<>(Arrays.asList(new NewznabAttribute("category", "5000")))).category("TV").build(); NewznabXmlItem result2 = RssItemBuilder.builder("indexer1-result2").pubDate(Instant.now().minus(2, ChronoUnit.DAYS)).hasNfo(true).grabs(2).size(mbToBytes(2)).newznabAttributes(new ArrayList<>(Arrays.asList(new NewznabAttribute("category", "5040")))).category("TV SD").build(); NewznabXmlItem result3 = RssItemBuilder.builder("indexer1-result3").pubDate(Instant.now().minus(3, ChronoUnit.DAYS)).comments("comments").grabs(3).size(mbToBytes(3)).newznabAttributes(new ArrayList<>(Arrays.asList(new NewznabAttribute("category", "5030")))).category("TV HD").build(); NewznabXmlRoot rssRoot = NewznabMockBuilder.getRssRoot(Arrays.asList(result1, result2, result3), 0, 3); return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } NewznabXmlItem result4 = RssItemBuilder.builder("indexer2-result1").pubDate(Instant.now().minus(4, ChronoUnit.DAYS)).grabs(4).size(mbToBytes(4)).newznabAttributes(new ArrayList<>(Arrays.asList(new NewznabAttribute("category", "2000")))).category("Movies").build(); NewznabXmlItem result5 = RssItemBuilder.builder("indexer2-result2").pubDate(Instant.now().minus(5, ChronoUnit.DAYS)).grabs(5).size(mbToBytes(5)).newznabAttributes(new ArrayList<>(Arrays.asList(new NewznabAttribute("category", "2040")))).category("Movies HD").build(); NewznabXmlRoot rssRoot = NewznabMockBuilder.getRssRoot(Arrays.asList(result4, result5), 0, 2); return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } if (params.getQ() != null && params.getQ().equals("dognzbtotaltest") && System.getProperty("nomockdognzb") == null) { if (params.getOffset() >= 300) { NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(0, -1, itemTitleBase, false, Collections.emptyList()); return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } NewznabMockRequest mockRequest = NewznabMockRequest.builder().numberOfResults(100).titleBase("dognzbtotaltest").offset(params.getOffset()).titleWords(Collections.emptyList()).total(300).build(); NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(mockRequest); rssRoot.getRssChannel().getNewznabResponse().setTotal(100); return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } if ((params.getQ() != null && params.getQ().equals("noresults")) || (params.getTvdbid() != null && params.getTvdbid().equals("329089"))) { NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(0, -1, itemTitleBase, false, Collections.emptyList()); return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } if (params.getQ() != null && params.getQ().equals("sleep")) { Thread.sleep(new Random().nextInt(5000)); } if (params.getQ() != null && params.getQ().equals("sleep10")) { Thread.sleep(10000); } if (params.getQ() != null && params.getQ().equals("sleepforever")) { Thread.sleep(10000 * 10000); } if (params.getQ() != null && params.getQ().contains("movies")) { NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(0, 100, itemTitleBase, false, Arrays.asList("cam", "ts", "blu-ray 2160p", "web-dl 1080p", "bluray 1080p", "3d bluray")); rssRoot.getRssChannel().getNewznabResponse().setTotal(100); rssRoot.getRssChannel().getItems().forEach(x -> x.getNewznabAttributes().add(new NewznabAttribute("coverurl", "https://i.omgwtfnzbs.me/tvdb/697fdaeb0fb1ac87d4d6af684b20593a/697fdaeb0fb1ac87d4d6af684b20593a.jpg"))); return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } if ("oneduplicate".equals(params.getQ())) { NewznabMockRequest mockRequest = NewznabMockRequest.builder() .numberOfResults(1) .titleBase(itemTitleBase) .generateOneDuplicate(true) .build(); NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(mockRequest); rssRoot.getRssChannel().getNewznabResponse().setTotal(1); return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } if ("titlegroup".equals(params.getQ())) { NewznabMockRequest mockRequest = NewznabMockRequest.builder() .numberOfResults(1) .titleBase(itemTitleBase) .generateOneDuplicate(false) .build(); NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(mockRequest); rssRoot.getRssChannel().getNewznabResponse().setTotal(1); rssRoot.getRssChannel().getItems().forEach(x -> x.setTitle("titlegroup")); return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } if (params.getTmdbid() != null) { if (itemTitleBase.equals("tmdberror") || "capscheckerror".equals(params.getApikey())) { NewznabXmlError rssError = new NewznabXmlError("123", "description"); return new ResponseEntity<Object>(rssError, HttpStatus.OK); } NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(0, 10, "avengers", doGenerateDuplicates, Collections.emptyList()); return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } if ("error".equals(params.getQ())) { NewznabXmlError rssError = new NewznabXmlError("123", "description"); return new ResponseEntity<Object>(rssError, HttpStatus.OK); } if (params.getImdbid() != null) { NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(0, 10, "avengers", doGenerateDuplicates, Collections.emptyList()); rssRoot.getRssChannel().getNewznabResponse().setTotal(10); return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } int endIndex; int key = 0; try { key = Integer.valueOf(itemTitleBase); } catch (NumberFormatException e) { endIndex = 0; } if (apikeyToResultCount.containsKey(key)) { endIndex = apikeyToResultCount.get(key); } else { endIndex = 0; } if (responsesPerApikey.containsKey(endIndex)) { return new ResponseEntity<Object>(responsesPerApikey.get(endIndex), HttpStatus.OK); } else { if (params.getOffset() != null && params.getLimit() != null) { endIndex = Math.min(params.getOffset() + params.getLimit(), endIndex); } NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(0, endIndex, itemTitleBase, doGenerateDuplicates, Collections.emptyList()); rssRoot.getRssChannel().getNewznabResponse().setTotal(endIndex); return new ResponseEntity<Object>(rssRoot, HttpStatus.OK); } } #location 180 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private void migrateIndexerApiAccesses(Map<Integer, IndexerEntity> oldIdToIndexersMap) throws SQLException { Statement statement = connection.createStatement(); int countIndexerApiAccesses = getCount(statement, "INDEXERAPIACCESS"); logger.info("Migrating {} indexer API accesses from old database", countIndexerApiAccesses); eventPublisher.publishEvent(new MigrationMessageEvent("Migrating " + countIndexerApiAccesses + " indexer API access entries")); ResultSet oldIndexerApiAccesses = statement.executeQuery("SELECT * FROM INDEXERAPIACCESS"); int countMigrated = 1; IndexerApiAccessEntity entity; ProgressLogger progressLogger = new ProgressLogger(logger, 5, TimeUnit.SECONDS); progressLogger.expectedUpdates = countIndexerApiAccesses; progressLogger.start(); while (oldIndexerApiAccesses.next()) { entity = new IndexerApiAccessEntity(); entity.setIndexer(oldIdToIndexersMap.get(oldIndexerApiAccesses.getInt("indexer_id"))); entity.setTime(timestampToInstant(oldIndexerApiAccesses.getString("time"))); Object responseTime = oldIndexerApiAccesses.getObject("response_time"); entity.setResponseTime(responseTime != null ? ((Integer) responseTime).longValue() : null); String error = oldIndexerApiAccesses.getString("error"); entity.setError(error != null ? error.substring(0, Math.min(4000, error.length())) : null); entity.setAccessType(null); entity.setResult(oldIndexerApiAccesses.getBoolean("response_successful") ? IndexerAccessResult.SUCCESSFUL : IndexerAccessResult.CONNECTION_ERROR); //Close enough entity.setAccessType(IndexerApiAccessType.valueOf(oldIndexerApiAccesses.getString("type").toUpperCase())); entityManager.persist(entity); progressLogger.lightUpdate(); if (countMigrated++ % 50 == 0) { entityManager.flush(); entityManager.clear(); } } progressLogger.stop(); statement.close(); entityManager.flush(); entityManager.clear(); eventPublisher.publishEvent(new MigrationMessageEvent("Successfully migrated indexer API accesses from old database")); logger.info("Successfully migrated indexer API accesses from old database"); }
#vulnerable code private void migrateIndexerApiAccesses(Map<Integer, IndexerEntity> oldIdToIndexersMap) throws SQLException { Statement statement = connection.createStatement(); int countIndexerApiAccesses = getCount(statement, "INDEXERAPIACCESS"); logger.info("Migrating {} indexer API accesses from old database", countIndexerApiAccesses); eventPublisher.publishEvent(new MigrationMessageEvent("Migrating " + countIndexerApiAccesses + " indexer API access entries")); ResultSet oldIndexerApiAccesses = statement.executeQuery("SELECT * FROM INDEXERAPIACCESS"); int countMigrated = 1; IndexerApiAccessEntity entity; ProgressLogger progressLogger = new ProgressLogger(logger, 5, TimeUnit.SECONDS); progressLogger.expectedUpdates = countIndexerApiAccesses; progressLogger.start(); while (oldIndexerApiAccesses.next()) { entity = new IndexerApiAccessEntity(); entity.setIndexer(oldIdToIndexersMap.get(oldIndexerApiAccesses.getInt("indexer_id"))); entity.setTime(oldIndexerApiAccesses.getTimestamp("time").toInstant()); Object responseTime = oldIndexerApiAccesses.getObject("response_time"); entity.setResponseTime(responseTime != null ? ((Integer) responseTime).longValue() : null); String error = oldIndexerApiAccesses.getString("error"); entity.setError(error != null ? error.substring(0, Math.min(4000, error.length())) : null); entity.setAccessType(null); entity.setResult(oldIndexerApiAccesses.getBoolean("response_successful") ? IndexerAccessResult.SUCCESSFUL : IndexerAccessResult.CONNECTION_ERROR); //Close enough entity.setAccessType(IndexerApiAccessType.valueOf(oldIndexerApiAccesses.getString("type").toUpperCase())); entityManager.persist(entity); progressLogger.lightUpdate(); if (countMigrated++ % 50 == 0) { entityManager.flush(); entityManager.clear(); } } progressLogger.stop(); statement.close(); entityManager.flush(); entityManager.clear(); eventPublisher.publishEvent(new MigrationMessageEvent("Successfully migrated indexer API accesses from old database")); logger.info("Successfully migrated indexer API accesses from old database"); } #location 23 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void buildVariablesFor(AbstractBuild build, Map<String, String> variables) { final Executor executor = build.getExecutor(); if (executor != null && executor.getOwner() instanceof DockerComputer) { final DockerComputer dockerComputer = (DockerComputer) executor.getOwner(); variables.put("DOCKER_CONTAINER_ID", dockerComputer.getContainerId()); variables.put("JENKINS_CLOUD_ID", dockerComputer.getCloudId()); final DockerCloud cloud = dockerComputer.getCloud(); if (cloud != null && cloud.isExposeDockerHost()) { //replace http:// and https:// from docker-java to tcp:// String dockerHost = cloud.getDockerHost().getUri(); if (dockerHost.startsWith("unix:")) { dockerHost = "tcp:" + dockerHost.substring(5); } variables.put("DOCKER_HOST", dockerHost); } } }
#vulnerable code @Override public void buildVariablesFor(AbstractBuild build, Map<String, String> variables) { final Executor executor = build.getExecutor(); if (executor != null && executor.getOwner() instanceof DockerComputer) { final DockerComputer dockerComputer = (DockerComputer) executor.getOwner(); variables.put("DOCKER_CONTAINER_ID", dockerComputer.getContainerId()); variables.put("JENKINS_CLOUD_ID", dockerComputer.getCloudId()); final DockerCloud cloud = dockerComputer.getCloud(); if (cloud.isExposeDockerHost()) { //replace http:// and https:// from docker-java to tcp:// String dockerHost = cloud.getDockerHost().getUri(); if (dockerHost.startsWith("unix:")) { dockerHost = "tcp:" + dockerHost.substring(5); } variables.put("DOCKER_HOST", dockerHost); } } } #location 10 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testAddingVmargsInBeforeContainerCreated() throws IOException, InterruptedException { // Given final String vmargs = "-Dhttp.proxyPort=8080"; final DockerComputerJNLPConnector connector = new DockerComputerJNLPConnector(new JNLPLauncher(null, vmargs)); final CreateContainerCmd createCmd = mock(CreateContainerCmd.class); final Map<String, String> containerLabels = new TreeMap<>(); when(createCmd.getLabels()).thenReturn(containerLabels); DockerTemplate.setNodeNameInContainerConfig(createCmd, "nodeName"); // When connector.beforeContainerCreated(null, null, createCmd); // Then verify(createCmd, times(1)).withEnv(new String[]{ "JAVA_OPT=" + vmargs }); }
#vulnerable code @Test public void testAddingVmargsInBeforeContainerCreated() throws IOException, InterruptedException { String vmargs = "-Dhttp.proxyPort=8080"; DockerComputerJNLPConnector connector = new DockerComputerJNLPConnector(new JNLPLauncher(null, vmargs)); CreateContainerCmd createCmd = new CreateContainerCmdImpl(createContainerCmd -> null, "hello-world"); createCmd.withName("container-name"); connector.beforeContainerCreated(null, null, createCmd); String[] env = createCmd.getEnv(); assertNotNull("Environment variable is expected", env); assertEquals("Environment variable is expected", 1, env.length); assertTrue("Original environment variable is not found", env[0].endsWith(vmargs)); } #location 11 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testKeepingEvnInBeforeContainerCreated() throws IOException, InterruptedException { // Given final String env1 = "ENV1=val1"; final String vmargs = "-Dhttp.proxyPort=8080"; final DockerComputerJNLPConnector connector = new DockerComputerJNLPConnector(new JNLPLauncher(null, vmargs)); final CreateContainerCmd createCmd = mock(CreateContainerCmd.class); final Map<String, String> containerLabels = new TreeMap<>(); when(createCmd.getLabels()).thenReturn(containerLabels); DockerTemplate.setNodeNameInContainerConfig(createCmd, "nodeName"); when(createCmd.getEnv()).thenReturn(new String[]{ env1 }); // When connector.beforeContainerCreated(null, null, createCmd); // Then verify(createCmd, times(1)).withEnv(new String[]{ env1, "JAVA_OPT=" + vmargs }); }
#vulnerable code @Test public void testKeepingEvnInBeforeContainerCreated() throws IOException, InterruptedException { String env1 = "ENV1=val1"; DockerComputerJNLPConnector connector = new DockerComputerJNLPConnector(new JNLPLauncher(null, "-Dhttp.proxyPort=8080")); CreateContainerCmd createCmd = new CreateContainerCmdImpl(createContainerCmd -> null, "hello-world"); createCmd.withName("container-name").withEnv(env1); connector.beforeContainerCreated(null, null, createCmd); String[] env = createCmd.getEnv(); assertNotNull("Environment variables are expected", env); assertEquals("Environment variables are expected", 2, env.length); assertTrue("Original environment variable is not found", Arrays.asList(env).contains(env1)); } #location 11 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private void pullImage(DockerTemplate dockerTemplate) throws IOException { final String imageName = dockerTemplate.getDockerTemplateBase().getImage(); if (shouldPullImage(imageName, dockerTemplate.getPullStrategy())) { LOGGER.info("Pulling image '{}'. This may take awhile...", imageName); long startTime = System.currentTimeMillis(); PullImageCmd imgCmd = getClient().pullImageCmd(imageName); final DockerRegistryEndpoint registry = dockerTemplate.getRegistry(); setRegistryAuthentication(imgCmd, registry); imgCmd.exec(new PullImageResultCallback()).awaitSuccess(); long pullTime = System.currentTimeMillis() - startTime; LOGGER.info("Finished pulling image '{}', took {} ms", imageName, pullTime); } }
#vulnerable code private void pullImage(DockerTemplate dockerTemplate) throws IOException { final String imageName = dockerTemplate.getDockerTemplateBase().getImage(); if (shouldPullImage(imageName, dockerTemplate.getPullStrategy())) { LOGGER.info("Pulling image '{}'. This may take awhile...", imageName); long startTime = System.currentTimeMillis(); PullImageCmd imgCmd = getClient().pullImageCmd(imageName); final DockerRegistryEndpoint registry = dockerTemplate.getRegistry(); if (registry == null) { DockerRegistryToken token = registry.getToken(null); AuthConfig auth = new AuthConfig() .withRegistryAddress(registry.getUrl()) .withEmail(token.getEmail()) .withRegistrytoken(token.getToken()); imgCmd.withAuthConfig(auth); } imgCmd.exec(new PullImageResultCallback()).awaitSuccess(); long pullTime = System.currentTimeMillis() - startTime; LOGGER.info("Finished pulling image '{}', took {} ms", imageName, pullTime); } } #location 13 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public RpcResponse send(RpcRequest request) throws Exception { EventLoopGroup group = new NioEventLoopGroup(); try { Bootstrap bootstrap = new Bootstrap(); bootstrap.group(group) .channel(NioSocketChannel.class) .handler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel channel) throws Exception { channel.pipeline().addLast(new RpcEncoder(RpcRequest.class)) // 将 RPC 请求进行编码(为了发送请求) .addLast(new RpcDecoder(RpcResponse.class)) // 将 RPC 响应进行解码(为了处理响应) .addLast(RpcClient.this); // 使用 RpcClient 发送 RPC 请求 } }) .option(ChannelOption.SO_TIMEOUT, timeout) .option(ChannelOption.SO_KEEPALIVE, true); ChannelFuture future = bootstrap.connect(host, port).sync(); future.channel().writeAndFlush(request).sync(); synchronized (obj) { obj.wait(); // 未收到响应,使线程等待 } if (response != null) { future.channel().closeFuture().sync(); } return response; } finally { group.shutdownGracefully(); } }
#vulnerable code public RpcResponse send(RpcRequest request) throws Exception { EventLoopGroup group = new NioEventLoopGroup(); try { Bootstrap bootstrap = new Bootstrap(); bootstrap.group(group).channel(NioSocketChannel.class).handler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel channel) throws Exception { channel.pipeline().addLast(new RpcEncoder(RpcRequest.class)) // 将 RPC 请求进行编码(为了发送请求) .addLast(new RpcDecoder(RpcResponse.class)) // 将 RPC 响应进行解码(为了处理响应) .addLast(RpcClient.this); // 使用 RpcClient 发送 RPC 请求 } }).option(ChannelOption.SO_KEEPALIVE, true); ChannelFuture future = bootstrap.connect(host, port).sync(); future.channel().writeAndFlush(request).sync(); synchronized (obj) { obj.wait(); // 未收到响应,使线程等待 } if (response != null) { future.channel().closeFuture().sync(); } return response; } finally { group.shutdownGracefully(); } } #location 24 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testDecompressionWithZLIB() throws Exception { // Build a datagram packet. DatagramPacket gelfMessage = GELFTestHelper.buildZLIBCompressedDatagramPacket(this.originalMessage); // Let the decompression take place. SimpleGELFClientHandler handler = new SimpleGELFClientHandler(gelfMessage, "foo"); assertEquals(handler.getClientMessage(), this.originalMessage); }
#vulnerable code @Test public void testDecompressionWithZLIB() throws Exception { // ZLIB compress message. byte[] compressMe = this.originalMessage.getBytes(); byte[] compressedMessage = new byte[compressMe.length]; Deflater compressor = new Deflater(); compressor.setInput(compressMe); compressor.finish(); compressor.deflate(compressedMessage); // Build a datagram packet. DatagramPacket gelfMessage = new DatagramPacket(compressedMessage, compressedMessage.length); // Let the decompression take place. SimpleGELFClientHandler handler = new SimpleGELFClientHandler(gelfMessage, "foo"); assertEquals(handler.getClientMessage(), this.originalMessage); } #location 9 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public static double heightDegrees(int n) { if (hashHeightCache[n - 1] == null) { double a; if (n % 2 == 0) a = -1; else a = -0.5; double result = 90 / Math.pow(2, 2.5 * n + a); hashHeightCache[n - 1] = result; } return hashHeightCache[n - 1]; }
#vulnerable code public static double heightDegrees(int n) { if (hashHeightCache.get(n) == null) { double a; if (n % 2 == 0) a = -1; else a = -0.5; double result = 90 / Math.pow(2, 2.5 * n + a); hashHeightCache.put(n, result); } return hashHeightCache.get(n); } #location 11 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public void add(double lat, double lon, long time, T t, long expiryTime) { String hash = GeoHash.encodeHash(lat, lon); // full hash length is 12 so this will insert 12 entries for (int i = 1; i <= hash.length(); i++) { long key = Base32.decodeBase32(hash.substring(0, i)); if (map.get(key) == null) { map.put(key, new ConcurrentSkipListMap<Long, Info<T>>()); } map.get(key).put(time, new Info<T>(key, lat, lon, time, t)); } }
#vulnerable code public void add(double lat, double lon, long time, T t, long expiryTime) { String hash = GeoHash.encodeHash(lat, lon); for (int i = 1; i <= hash.length(); i++) { String key = hash.substring(0, i); if (map.get(key) == null) { map.put(key, Maps.<Long, T> newTreeMap()); } map.get(key).put(time, t); } } #location 8 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testDeleteViewIndexSequences() throws Exception { createBaseTable(tableName, false, null, null); Connection conn1 = getConnection(); Connection conn2 = getConnection(); String viewName = schemaName + "." + VIEW_NAME; conn1.createStatement().execute("CREATE VIEW " + viewName + " AS SELECT * FROM " + tableName); conn1.createStatement().execute("CREATE INDEX " + indexName + " ON " + viewName + " (v1)"); conn2.createStatement().executeQuery("SELECT * FROM " + tableName).next(); String query = "SELECT sequence_schema, sequence_name, current_value, increment_by FROM SYSTEM.\"SEQUENCE\" WHERE sequence_schema like '%" + schemaName + "%'"; ResultSet rs = conn1.prepareStatement(query).executeQuery(); assertTrue(rs.next()); assertEquals(MetaDataUtil.getViewIndexSequenceSchemaName(PNameFactory.newName(tableName), isNamespaceMapped), rs.getString("sequence_schema")); assertEquals(MetaDataUtil.getViewIndexSequenceName(PNameFactory.newName(tableName), null, isNamespaceMapped), rs.getString("sequence_name")); assertEquals(-32767, rs.getInt("current_value")); assertEquals(1, rs.getInt("increment_by")); assertFalse(rs.next()); HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin(); conn1.createStatement().execute("DROP VIEW " + viewName); conn1.createStatement().execute("DROP TABLE "+ tableName); admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin(); assertFalse("View index table should be deleted.", admin.tableExists(TableName.valueOf(viewIndexPhysicalTableName))); rs = conn2.createStatement().executeQuery("SELECT " + PhoenixDatabaseMetaData.SEQUENCE_SCHEMA + "," + PhoenixDatabaseMetaData.SEQUENCE_NAME + " FROM " + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE); assertFalse("View index sequences should be deleted.", rs.next()); }
#vulnerable code @Test public void testDeleteViewIndexSequences() throws Exception { createBaseTable(tableName, false, null, null); Connection conn1 = getConnection(); Connection conn2 = getConnection(); conn1.createStatement().execute("CREATE VIEW " + VIEW_NAME + " AS SELECT * FROM " + tableName); conn1.createStatement().execute("CREATE INDEX " + indexName + " ON " + VIEW_NAME + " (v1)"); conn2.createStatement().executeQuery("SELECT * FROM " + tableName).next(); HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin(); conn1.createStatement().execute("DROP VIEW " + VIEW_NAME); conn1.createStatement().execute("DROP TABLE "+ tableName); admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin(); assertFalse("View index table should be deleted.", admin.tableExists(TableName.valueOf(viewIndexPhysicalTableName))); ResultSet rs = conn2.createStatement().executeQuery("SELECT " + PhoenixDatabaseMetaData.SEQUENCE_SCHEMA + "," + PhoenixDatabaseMetaData.SEQUENCE_NAME + " FROM " + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE); assertFalse("View index sequences should be deleted.", rs.next()); } #location 13 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private static Object[] coerceToNewLength(PDataType baseType, Object[] elements, int maxLength) { Object[] resizedElements = new Object[elements.length]; for (int i = 0; i < elements.length; i++) { Integer length = baseType.getMaxLength(elements[i]); if (length != null) { if (length == maxLength) { resizedElements[i] = elements[i]; } else { resizedElements[i] = baseType.pad(elements[i], maxLength); } } else { resizedElements[i] = baseType.pad(elements[i], maxLength); } } return resizedElements; }
#vulnerable code private static Object[] coerceToNewLength(PDataType baseType, Object[] elements, int maxLength) { Object[] resizedElements = new Object[elements.length]; for (int i = 0; i < elements.length; i++) { int length = baseType.getMaxLength(elements[i]); if (length == maxLength) { resizedElements[i] = elements[i]; } else { resizedElements[i] = baseType.pad(elements[i],maxLength); } } return resizedElements; } #location 4 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private PTable doGetTable(byte[] tenantId, byte[] schemaName, byte[] tableName, long clientTimeStamp, RowLock rowLock, int clientVersion, boolean skipAddingIndexes, boolean skipAddingParentColumns, PTable lockedAncestorTable) throws IOException, SQLException { Region region = env.getRegion(); final byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName); // if this region doesn't contain the metadata rows look up the table by using PhoenixRuntime.getTable if (!region.getRegionInfo().containsRow(key)) { Properties props = new Properties(); if (tenantId != null) { props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, Bytes.toString(tenantId)); } if (clientTimeStamp != HConstants.LATEST_TIMESTAMP) { props.setProperty("CurrentSCN", Long.toString(clientTimeStamp)); } try (PhoenixConnection connection = QueryUtil.getConnectionOnServer(props, env.getConfiguration()) .unwrap(PhoenixConnection.class)) { ConnectionQueryServices queryServices = connection.getQueryServices(); MetaDataMutationResult result = queryServices.getTable(PNameFactory.newName(tenantId), schemaName, tableName, HConstants.LATEST_TIMESTAMP, clientTimeStamp, skipAddingIndexes, skipAddingParentColumns, lockedAncestorTable); return result.getTable(); } catch (ClassNotFoundException e) { } } ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key); // Ask Lars about the expense of this call - if we don't take the lock, we still won't get // partial results // get the co-processor environment // TODO: check that key is within region.getStartKey() and region.getEndKey() // and return special code to force client to lookup region from meta. /* * Lock directly on key, though it may be an index table. This will just prevent a table * from getting rebuilt too often. */ final boolean wasLocked = (rowLock != null); try { if (!wasLocked) { rowLock = acquireLock(region, key, null); } PTable table = getTableFromCache(cacheKey, clientTimeStamp, clientVersion, skipAddingIndexes, skipAddingParentColumns, lockedAncestorTable); table = modifyIndexStateForOldClient(clientVersion, table); // We only cache the latest, so we'll end up building the table with every call if the // client connection has specified an SCN. // TODO: If we indicate to the client that we're returning an older version, but there's a // newer version available, the client // can safely not call this, since we only allow modifications to the latest. if (table != null && table.getTimeStamp() < clientTimeStamp) { // Table on client is up-to-date with table on server, so just return if (isTableDeleted(table)) { return null; } return table; } // Query for the latest table first, since it's not cached table = buildTable(key, cacheKey, region, HConstants.LATEST_TIMESTAMP, clientVersion, skipAddingIndexes, skipAddingParentColumns, lockedAncestorTable); if ((table != null && table.getTimeStamp() < clientTimeStamp) || (blockWriteRebuildIndex && table.getIndexDisableTimestamp() > 0)) { return table; } // Otherwise, query for an older version of the table - it won't be cached table = buildTable(key, cacheKey, region, clientTimeStamp, clientVersion, skipAddingIndexes, skipAddingParentColumns, lockedAncestorTable); return table; } finally { if (!wasLocked && rowLock!=null) rowLock.release(); } }
#vulnerable code private PTable doGetTable(byte[] tenantId, byte[] schemaName, byte[] tableName, long clientTimeStamp, RowLock rowLock, int clientVersion, boolean skipAddingIndexes, boolean skipAddingParentColumns, PTable lockedAncestorTable) throws IOException, SQLException { Region region = env.getRegion(); final byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName); // if this region doesn't contain the metadata rows look up the table by using PhoenixRuntime.getTable if (!region.getRegionInfo().containsRow(key)) { Properties props = new Properties(); if (tenantId != null) { props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, Bytes.toString(tenantId)); } if (clientTimeStamp != HConstants.LATEST_TIMESTAMP) { props.setProperty("CurrentSCN", Long.toString(clientTimeStamp)); } try (PhoenixConnection connection = QueryUtil.getConnectionOnServer(props, env.getConfiguration()) .unwrap(PhoenixConnection.class)) { ConnectionQueryServices queryServices = connection.getQueryServices(); MetaDataMutationResult result = queryServices.getTable(PNameFactory.newName(tenantId), schemaName, tableName, HConstants.LATEST_TIMESTAMP, clientTimeStamp, skipAddingIndexes, skipAddingParentColumns, lockedAncestorTable); return result.getTable(); } catch (ClassNotFoundException e) { } } ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key); // Ask Lars about the expense of this call - if we don't take the lock, we still won't get // partial results // get the co-processor environment // TODO: check that key is within region.getStartKey() and region.getEndKey() // and return special code to force client to lookup region from meta. /* * Lock directly on key, though it may be an index table. This will just prevent a table * from getting rebuilt too often. */ final boolean wasLocked = (rowLock != null); try { if (!wasLocked) { rowLock = acquireLock(region, key, null); } PTable table = getTableFromCache(cacheKey, clientTimeStamp, clientVersion, skipAddingIndexes, skipAddingParentColumns, lockedAncestorTable); // We only cache the latest, so we'll end up building the table with every call if the // client connection has specified an SCN. // TODO: If we indicate to the client that we're returning an older version, but there's a // newer version available, the client // can safely not call this, since we only allow modifications to the latest. if (table != null && table.getTimeStamp() < clientTimeStamp) { // Table on client is up-to-date with table on server, so just return if (isTableDeleted(table)) { return null; } return table; } // Query for the latest table first, since it's not cached table = buildTable(key, cacheKey, region, HConstants.LATEST_TIMESTAMP, clientVersion, skipAddingIndexes, skipAddingParentColumns, lockedAncestorTable); if ((table != null && table.getTimeStamp() < clientTimeStamp) || (blockWriteRebuildIndex && table.getIndexDisableTimestamp() > 0)) { return table; } // Otherwise, query for an older version of the table - it won't be cached table = buildTable(key, cacheKey, region, clientTimeStamp, clientVersion, skipAddingIndexes, skipAddingParentColumns, lockedAncestorTable); return table; } finally { if (!wasLocked && rowLock!=null) rowLock.release(); } } #location 63 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void close() throws SQLException { if (closed) { return; } synchronized (this) { if (closed) { return; } closed = true; SQLException sqlE = null; try { // Attempt to return any unused sequences. if (connection != null) returnAllSequences(this.sequenceMap); } catch (SQLException e) { sqlE = e; } finally { try { // Clear any client-side caches. statsManager.clearStats(); } catch (SQLException e) { if (sqlE == null) { sqlE = e; } else { sqlE.setNextException(e); } } finally { try { childServices.clear(); latestMetaData = null; if (connection != null) connection.close(); } catch (IOException e) { if (sqlE == null) { sqlE = ServerUtil.parseServerException(e); } else { sqlE.setNextException(ServerUtil.parseServerException(e)); } } finally { try { super.close(); } catch (SQLException e) { if (sqlE == null) { sqlE = e; } else { sqlE.setNextException(e); } } finally { if (sqlE != null) { throw sqlE; } } } } } } }
#vulnerable code @Override public void close() throws SQLException { SQLException sqlE = null; try { // Attempt to return any unused sequences. returnAllSequences(this.sequenceMap); } catch (SQLException e) { sqlE = e; } finally { try { // Clear any client-side caches. statsManager.clearStats(); } catch (SQLException e) { if (sqlE == null) { sqlE = e; } else { sqlE.setNextException(e); } } finally { try { childServices.clear(); latestMetaData = null; connection.close(); } catch (IOException e) { if (sqlE == null) { sqlE = ServerUtil.parseServerException(e); } else { sqlE.setNextException(ServerUtil.parseServerException(e)); } } finally { try { super.close(); } catch (SQLException e) { if (sqlE == null) { sqlE = e; } else { sqlE.setNextException(e); } } finally { if (sqlE != null) { throw sqlE; } } } } } } #location 6 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public Expression visitLeave(ArrayConstructorNode node, List<Expression> children) throws SQLException { boolean isChildTypeUnknown = false; Expression arrayElemChild = null; PDataType arrayElemDataType = children.get(0).getDataType(); for (int i = 0; i < children.size(); i++) { Expression child = children.get(i); PDataType childType = child.getDataType(); if (childType == null) { isChildTypeUnknown = true; } else if (arrayElemDataType == null) { arrayElemDataType = childType; isChildTypeUnknown = true; arrayElemChild = child; } else if (arrayElemDataType == childType || childType.isCoercibleTo(arrayElemDataType)) { continue; } else if (arrayElemDataType.isCoercibleTo(childType)) { arrayElemChild = child; arrayElemDataType = childType; } else { throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CONVERT_TYPE) .setMessage( "Case expressions must have common type: " + arrayElemDataType + " cannot be coerced to " + childType).build().buildException(); } } // If we found an "unknown" child type and the return type is a number // make the return type be the most general number type of DECIMAL. if (isChildTypeUnknown && arrayElemDataType != null && arrayElemDataType.isCoercibleTo(PDataType.DECIMAL)) { arrayElemDataType = PDataType.DECIMAL; } final PDataType theArrayElemDataType = arrayElemDataType; for (int i = 0; i < node.getChildren().size(); i++) { ParseNode childNode = node.getChildren().get(i); if (childNode instanceof BindParseNode) { context.getBindManager().addParamMetaData((BindParseNode)childNode, arrayElemDataType == arrayElemChild.getDataType() ? arrayElemChild : new DelegateDatum(arrayElemChild) { @Override public PDataType getDataType() { return theArrayElemDataType; } }); } } ImmutableBytesWritable ptr = context.getTempPtr(); Object[] elements = new Object[children.size()]; ArrayConstructorExpression arrayExpression = new ArrayConstructorExpression(children, arrayElemDataType); if (ExpressionUtil.isConstant(arrayExpression)) { for (int i = 0; i < children.size(); i++) { Expression child = children.get(i); child.evaluate(null, ptr); Object value = arrayElemDataType.toObject(ptr, child.getDataType(), child.getSortOrder()); elements[i] = LiteralExpression.newConstant(value, child.getDataType(), child.isDeterministic()).getValue(); } Object value = PArrayDataType.instantiatePhoenixArray(arrayElemDataType, elements); return LiteralExpression.newConstant(value, PDataType.fromTypeId(arrayElemDataType.getSqlType() + PDataType.ARRAY_TYPE_BASE), true); } return wrapGroupByExpression(arrayExpression); }
#vulnerable code @Override public Expression visitLeave(ArrayConstructorNode node, List<Expression> children) throws SQLException { boolean isChildTypeUnknown = false; Expression arrayElemChild = null; PDataType arrayElemDataType = children.get(0).getDataType(); for (int i = 0; i < children.size(); i++) { Expression child = children.get(i); PDataType childType = child.getDataType(); if (childType == null) { isChildTypeUnknown = true; } else if (arrayElemDataType == null) { arrayElemDataType = childType; isChildTypeUnknown = true; arrayElemChild = child; } else if (arrayElemDataType == childType || childType.isCoercibleTo(arrayElemDataType)) { continue; } else if (arrayElemDataType.isCoercibleTo(childType)) { arrayElemChild = child; arrayElemDataType = childType; } else { throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CONVERT_TYPE) .setMessage( "Case expressions must have common type: " + arrayElemDataType + " cannot be coerced to " + childType).build().buildException(); } } // If we found an "unknown" child type and the return type is a number // make the return type be the most general number type of DECIMAL. if (isChildTypeUnknown && arrayElemDataType != null && arrayElemDataType.isCoercibleTo(PDataType.DECIMAL)) { arrayElemDataType = PDataType.DECIMAL; } final PDataType theArrayElemDataType = arrayElemDataType; for (int i = 0; i < node.getChildren().size(); i++) { ParseNode childNode = node.getChildren().get(i); if (childNode instanceof BindParseNode) { context.getBindManager().addParamMetaData((BindParseNode)childNode, arrayElemDataType == arrayElemChild.getDataType() ? arrayElemChild : new DelegateDatum(arrayElemChild) { @Override public PDataType getDataType() { return theArrayElemDataType; } }); } } ImmutableBytesWritable ptr = context.getTempPtr(); Object[] elements = new Object[children.size()]; if (node.isStateless()) { boolean isDeterministic = true; for (int i = 0; i < children.size(); i++) { Expression child = children.get(i); isDeterministic &= child.isDeterministic(); child.evaluate(null, ptr); Object value = arrayElemDataType.toObject(ptr, child.getDataType(), child.getSortOrder()); elements[i] = LiteralExpression.newConstant(value, child.getDataType(), child.isDeterministic()).getValue(); } Object value = PArrayDataType.instantiatePhoenixArray(arrayElemDataType, elements); return LiteralExpression.newConstant(value, PDataType.fromTypeId(arrayElemDataType.getSqlType() + PDataType.ARRAY_TYPE_BASE), isDeterministic); } ArrayConstructorExpression arrayExpression = new ArrayConstructorExpression(children, arrayElemDataType); return wrapGroupByExpression(arrayExpression); } #location 62 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public boolean next(List<Cell> results) throws IOException { if (indexRowKey != null && singleRowRebuildReturnCode == GlobalIndexChecker.RebuildReturnCode.NO_DATA_ROW.getValue()) { byte[] rowCountBytes = PLong.INSTANCE.toBytes(Long.valueOf(singleRowRebuildReturnCode)); final Cell aggKeyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); results.add(aggKeyValue); return false; } Cell lastCell = null; int rowCount = 0; region.startRegionOperation(); RegionScanner localScanner = null; try { byte[] uuidValue = ServerCacheClient.generateId(); localScanner = getLocalScanner(); if (localScanner == null) { return false; } synchronized (localScanner) { if (!shouldVerify()) { skipped = true; return false; } do { /** If region is closing and there are large number of rows being verified/rebuilt with IndexTool, not having this check will impact/delay the region closing -- affecting the availability as this method holds the read lock on the region. * */ ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); List<Cell> row = new ArrayList<Cell>(); hasMore = localScanner.nextRaw(row); if (!row.isEmpty()) { lastCell = row.get(0); // lastCell is any cell from the last visited row Put put = null; Delete del = null; for (Cell cell : row) { if (KeyValue.Type.codeToType(cell.getTypeByte()) == KeyValue.Type.Put) { if (!partialRebuild && familyMap != null && !isColumnIncluded(cell)) { continue; } if (put == null) { put = new Put(CellUtil.cloneRow(cell)); } put.add(cell); } else { if (del == null) { del = new Delete(CellUtil.cloneRow(cell)); } del.addDeleteMarker(cell); } } if (put == null && del == null) { continue; } // Always add the put first and then delete for a given row. This simplifies the logic in // IndexRegionObserver if (put != null) { mutations.add(put); } if (del != null) { mutations.add(del); } if (!verify) { if (put != null) { setMutationAttributes(put, uuidValue); } if (del != null) { setMutationAttributes(del, uuidValue); } uuidValue = commitIfReady(uuidValue, mutations); } else { byte[] dataKey = (put != null) ? put.getRow() : del.getRow(); prepareIndexMutations(put, del); dataKeyToMutationMap.put(dataKey, new Pair<Put, Delete>(put, del)); } rowCount++; } } while (hasMore && rowCount < pageSizeInRows); if (!mutations.isEmpty()) { if (verify) { verifyAndOrRebuildIndex(); } else { ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); ungroupedAggregateRegionObserver.commitBatchWithRetries(region, mutations, blockingMemstoreSize); } } } } catch (Throwable e) { LOGGER.error("Exception in IndexRebuildRegionScanner for region " + region.getRegionInfo().getRegionNameAsString(), e); throw e; } finally { region.closeRegionOperation(); mutations.clear(); if (verify) { dataKeyToMutationMap.clear(); indexKeyToMutationMap.clear(); } if (localScanner!=null && localScanner!=innerScanner) { localScanner.close(); } } if (indexRowKey != null) { rowCount = singleRowRebuildReturnCode; } if (minTimestamp != 0) { nextStartKey = ByteUtil.calculateTheClosestNextRowKeyForPrefix(CellUtil.cloneRow(lastCell)); } byte[] rowCountBytes = PLong.INSTANCE.toBytes(Long.valueOf(rowCount)); final Cell aggKeyValue; if (lastCell == null) { aggKeyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); } else { aggKeyValue = PhoenixKeyValueUtil.newKeyValue(CellUtil.cloneRow(lastCell), SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); } results.add(aggKeyValue); return hasMore || hasMoreIncr; }
#vulnerable code @Override public boolean next(List<Cell> results) throws IOException { if (indexRowKey != null && singleRowRebuildReturnCode == GlobalIndexChecker.RebuildReturnCode.NO_DATA_ROW.getValue()) { byte[] rowCountBytes = PLong.INSTANCE.toBytes(Long.valueOf(singleRowRebuildReturnCode)); final Cell aggKeyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); results.add(aggKeyValue); return false; } Cell lastCell = null; int rowCount = 0; region.startRegionOperation(); RegionScanner localScanner = null; try { byte[] uuidValue = ServerCacheClient.generateId(); localScanner = getLocalScanner(); if (localScanner == null) { return false; } synchronized (localScanner) { if (!shouldVerify()) { skipped = true; return false; } do { List<Cell> row = new ArrayList<Cell>(); hasMore = localScanner.nextRaw(row); if (!row.isEmpty()) { lastCell = row.get(0); // lastCell is any cell from the last visited row Put put = null; Delete del = null; for (Cell cell : row) { if (KeyValue.Type.codeToType(cell.getTypeByte()) == KeyValue.Type.Put) { if (!partialRebuild && familyMap != null && !isColumnIncluded(cell)) { continue; } if (put == null) { put = new Put(CellUtil.cloneRow(cell)); } put.add(cell); } else { if (del == null) { del = new Delete(CellUtil.cloneRow(cell)); } del.addDeleteMarker(cell); } } if (put == null && del == null) { continue; } // Always add the put first and then delete for a given row. This simplifies the logic in // IndexRegionObserver if (put != null) { mutations.add(put); } if (del != null) { mutations.add(del); } if (!verify) { if (put != null) { setMutationAttributes(put, uuidValue); } if (del != null) { setMutationAttributes(del, uuidValue); } uuidValue = commitIfReady(uuidValue, mutations); } else { byte[] dataKey = (put != null) ? put.getRow() : del.getRow(); prepareIndexMutations(put, del); dataKeyToMutationMap.put(dataKey, new Pair<Put, Delete>(put, del)); } rowCount++; } } while (hasMore && rowCount < pageSizeInRows); if (!mutations.isEmpty()) { if (verify) { verifyAndOrRebuildIndex(); } else { ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); ungroupedAggregateRegionObserver.commitBatchWithRetries(region, mutations, blockingMemstoreSize); } } } } catch (Throwable e) { LOGGER.error("Exception in IndexRebuildRegionScanner for region " + region.getRegionInfo().getRegionNameAsString(), e); throw e; } finally { region.closeRegionOperation(); mutations.clear(); if (verify) { dataKeyToMutationMap.clear(); indexKeyToMutationMap.clear(); } if (localScanner!=null && localScanner!=innerScanner) { localScanner.close(); } } if (indexRowKey != null) { rowCount = singleRowRebuildReturnCode; } if (minTimestamp != 0) { nextStartKey = ByteUtil.calculateTheClosestNextRowKeyForPrefix(CellUtil.cloneRow(lastCell)); } byte[] rowCountBytes = PLong.INSTANCE.toBytes(Long.valueOf(rowCount)); final Cell aggKeyValue; if (lastCell == null) { aggKeyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); } else { aggKeyValue = PhoenixKeyValueUtil.newKeyValue(CellUtil.cloneRow(lastCell), SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); } results.add(aggKeyValue); return hasMore || hasMoreIncr; } #location 117 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public int executeStatements(Reader reader, List<Object> binds, PrintStream out) throws IOException, SQLException { int bindsOffset = 0; int nStatements = 0; PhoenixStatementParser parser = new PhoenixStatementParser(reader); try { while (true) { PhoenixPreparedStatement stmt = new PhoenixPreparedStatement(this, parser); this.statements.add(stmt); ParameterMetaData paramMetaData = stmt.getParameterMetaData(); for (int i = 0; i < paramMetaData.getParameterCount(); i++) { stmt.setObject(i+1, binds.get(bindsOffset+i)); } long start = System.currentTimeMillis(); boolean isQuery = stmt.execute(); if (isQuery) { ResultSet rs = stmt.getResultSet(); if (!rs.next()) { if (out != null) { out.println("no rows selected"); } } else { int columnCount = 0; if (out != null) { ResultSetMetaData md = rs.getMetaData(); columnCount = md.getColumnCount(); for (int i = 1; i <= columnCount; i++) { int displayWidth = md.getColumnDisplaySize(i); String label = md.getColumnLabel(i); if (md.isSigned(i)) { out.print(displayWidth < label.length() ? label.substring(0,displayWidth) : Strings.padStart(label, displayWidth, ' ')); out.print(' '); } else { out.print(displayWidth < label.length() ? label.substring(0,displayWidth) : Strings.padEnd(md.getColumnLabel(i), displayWidth, ' ')); out.print(' '); } } out.println(); for (int i = 1; i <= columnCount; i++) { int displayWidth = md.getColumnDisplaySize(i); out.print(Strings.padStart("", displayWidth,'-')); out.print(' '); } out.println(); } do { if (out != null) { ResultSetMetaData md = rs.getMetaData(); for (int i = 1; i <= columnCount; i++) { int displayWidth = md.getColumnDisplaySize(i); String value = rs.getString(i); String valueString = value == null ? QueryConstants.NULL_DISPLAY_TEXT : value; if (md.isSigned(i)) { out.print(Strings.padStart(valueString, displayWidth, ' ')); } else { out.print(Strings.padEnd(valueString, displayWidth, ' ')); } out.print(' '); } out.println(); } } while (rs.next()); } } else if (out != null){ int updateCount = stmt.getUpdateCount(); if (updateCount >= 0) { out.println((updateCount == 0 ? "no" : updateCount) + (updateCount == 1 ? " row " : " rows ") + stmt.getUpdateOperation().toString()); } } bindsOffset += paramMetaData.getParameterCount(); double elapsedDuration = ((System.currentTimeMillis() - start) / 1000.0); out.println("Time: " + elapsedDuration + " sec(s)\n"); nStatements++; } } catch (EOFException e) { } return nStatements; }
#vulnerable code public int executeStatements(Reader reader, List<Object> binds, PrintStream out) throws IOException, SQLException { int bindsOffset = 0; int nStatements = 0; PhoenixStatementParser parser = new PhoenixStatementParser(reader); try { while (true) { PhoenixPreparedStatement stmt = new PhoenixPreparedStatement(this, parser); ParameterMetaData paramMetaData = stmt.getParameterMetaData(); for (int i = 0; i < paramMetaData.getParameterCount(); i++) { stmt.setObject(i+1, binds.get(bindsOffset+i)); } long start = System.currentTimeMillis(); boolean isQuery = stmt.execute(); if (isQuery) { ResultSet rs = stmt.getResultSet(); if (!rs.next()) { if (out != null) { out.println("no rows selected"); } } else { int columnCount = 0; if (out != null) { ResultSetMetaData md = rs.getMetaData(); columnCount = md.getColumnCount(); for (int i = 1; i <= columnCount; i++) { int displayWidth = md.getColumnDisplaySize(i); String label = md.getColumnLabel(i); if (md.isSigned(i)) { out.print(displayWidth < label.length() ? label.substring(0,displayWidth) : Strings.padStart(label, displayWidth, ' ')); out.print(' '); } else { out.print(displayWidth < label.length() ? label.substring(0,displayWidth) : Strings.padEnd(md.getColumnLabel(i), displayWidth, ' ')); out.print(' '); } } out.println(); for (int i = 1; i <= columnCount; i++) { int displayWidth = md.getColumnDisplaySize(i); out.print(Strings.padStart("", displayWidth,'-')); out.print(' '); } out.println(); } do { if (out != null) { ResultSetMetaData md = rs.getMetaData(); for (int i = 1; i <= columnCount; i++) { int displayWidth = md.getColumnDisplaySize(i); String value = rs.getString(i); String valueString = value == null ? QueryConstants.NULL_DISPLAY_TEXT : value; if (md.isSigned(i)) { out.print(Strings.padStart(valueString, displayWidth, ' ')); } else { out.print(Strings.padEnd(valueString, displayWidth, ' ')); } out.print(' '); } out.println(); } } while (rs.next()); } } else if (out != null){ int updateCount = stmt.getUpdateCount(); if (updateCount >= 0) { out.println((updateCount == 0 ? "no" : updateCount) + (updateCount == 1 ? " row " : " rows ") + stmt.getUpdateOperation().toString()); } } bindsOffset += paramMetaData.getParameterCount(); double elapsedDuration = ((System.currentTimeMillis() - start) / 1000.0); out.println("Time: " + elapsedDuration + " sec(s)\n"); nStatements++; } } catch (EOFException e) { } return nStatements; } #location 62 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void modifyTable(byte[] tableName, HTableDescriptor newDesc) throws IOException, InterruptedException, TimeoutException { try (HBaseAdmin admin = new HBaseAdmin(config)) { if (!allowOnlineTableSchemaUpdate()) { admin.disableTable(tableName); admin.modifyTable(tableName, newDesc); admin.enableTable(tableName); } else { admin.modifyTable(tableName, newDesc); pollForUpdatedTableDescriptor(admin, newDesc, tableName); } } }
#vulnerable code @Override public void modifyTable(byte[] tableName, HTableDescriptor newDesc) throws IOException, InterruptedException, TimeoutException { HBaseAdmin admin = new HBaseAdmin(config); if (!allowOnlineTableSchemaUpdate()) { admin.disableTable(tableName); admin.modifyTable(tableName, newDesc); admin.enableTable(tableName); } else { admin.modifyTable(tableName, newDesc); pollForUpdatedTableDescriptor(admin, newDesc, tableName); } } #location 11 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testSystemCatalogWALEntryFilter() throws Exception { //now create WAL.Entry objects that refer to cells in those view rows in System.Catalog Get tenantViewGet = getTenantViewGet(catalogTable, TENANT_BYTES, TENANT_VIEW_NAME); Get nonTenantViewGet = getTenantViewGet(catalogTable, DEFAULT_TENANT_BYTES, NONTENANT_VIEW_NAME); Get tenantLinkGet = getParentChildLinkGet(catalogTable, TENANT_BYTES, TENANT_VIEW_NAME); Get nonTenantLinkGet = getParentChildLinkGet(catalogTable, DEFAULT_TENANT_BYTES, NONTENANT_VIEW_NAME); WAL.Entry nonTenantViewEntry = getEntry(systemCatalogTableName, nonTenantViewGet); WAL.Entry tenantViewEntry = getEntry(systemCatalogTableName, tenantViewGet); WAL.Entry nonTenantLinkEntry = getEntry(systemCatalogTableName, nonTenantLinkGet); WAL.Entry tenantLinkEntry = getEntry(systemCatalogTableName, tenantLinkGet); //verify that the tenant view WAL.Entry passes the filter and the non-tenant view does not SystemCatalogWALEntryFilter filter = new SystemCatalogWALEntryFilter(); Assert.assertNull(filter.filter(nonTenantViewEntry)); WAL.Entry filteredTenantEntry = filter.filter(tenantViewEntry); Assert.assertNotNull("Tenant view was filtered when it shouldn't be!", filteredTenantEntry); Assert.assertEquals(tenantViewEntry.getEdit().size(), filter.filter(tenantViewEntry).getEdit().size()); //now check that a WAL.Entry with cells from both a tenant and a non-tenant //catalog row only allow the tenant cells through WALEdit comboEdit = new WALEdit(); comboEdit.getCells().addAll(nonTenantViewEntry.getEdit().getCells()); comboEdit.getCells().addAll(tenantViewEntry.getEdit().getCells()); WAL.Entry comboEntry = new WAL.Entry(walKey, comboEdit); Assert.assertEquals(tenantViewEntry.getEdit().size() + nonTenantViewEntry.getEdit().size() , comboEntry.getEdit().size()); Assert.assertEquals(tenantViewEntry.getEdit().size(), filter.filter(comboEntry).getEdit().size()); //now check that the parent-child links (which have the tenant_id of the view's parent, // but are a part of the view's metadata) are migrated in the tenant case // but not the non-tenant. The view's tenant_id is in th System.Catalog.COLUMN_NAME field Assert.assertNull("Non-tenant parent-child link was not filtered " + "when it should be!", filter.filter(nonTenantLinkEntry)); Assert.assertNotNull("Tenant parent-child link was filtered when it should not be!", filter.filter(tenantLinkEntry)); Assert.assertEquals(tenantLinkEntry.getEdit().size(), filter.filter(tenantLinkEntry).getEdit().size()); //add the parent-child link to the tenant view WAL entry, //since they'll usually be together and they both need to //be replicated tenantViewEntry.getEdit().getCells().addAll(tenantLinkEntry.getEdit().getCells()); Assert.assertEquals(tenantViewEntry.getEdit().size(), tenantViewEntry.getEdit().size()); }
#vulnerable code @Test public void testSystemCatalogWALEntryFilter() throws Exception { //now create WAL.Entry objects that refer to cells in those view rows in System.Catalog Get tenantGet = getGet(catalogTable, TENANT_BYTES, TENANT_VIEW_NAME); Get nonTenantGet = getGet(catalogTable, DEFAULT_TENANT_BYTES, NONTENANT_VIEW_NAME); WAL.Entry nonTenantEntry = getEntry(systemCatalogTableName, nonTenantGet); WAL.Entry tenantEntry = getEntry(systemCatalogTableName, tenantGet); //verify that the tenant view WAL.Entry passes the filter and the non-tenant view does not SystemCatalogWALEntryFilter filter = new SystemCatalogWALEntryFilter(); Assert.assertNull(filter.filter(nonTenantEntry)); WAL.Entry filteredTenantEntry = filter.filter(tenantEntry); Assert.assertNotNull("Tenant view was filtered when it shouldn't be!", filteredTenantEntry); Assert.assertEquals(tenantEntry.getEdit().size(), filter.filter(tenantEntry).getEdit().size()); //now check that a WAL.Entry with cells from both a tenant and a non-tenant //catalog row only allow the tenant cells through WALEdit comboEdit = new WALEdit(); comboEdit.getCells().addAll(nonTenantEntry.getEdit().getCells()); comboEdit.getCells().addAll(tenantEntry.getEdit().getCells()); WAL.Entry comboEntry = new WAL.Entry(walKey, comboEdit); Assert.assertEquals(tenantEntry.getEdit().size() + nonTenantEntry.getEdit().size() , comboEntry.getEdit().size()); Assert.assertEquals(tenantEntry.getEdit().size(), filter.filter(comboEntry).getEdit().size()); } #location 18 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testSelectUpsertWithNewClient() throws Exception { // Insert data with old client and read with new client executeQueryWithClientVersion(compatibleClientVersion, CREATE_ADD); executeQueriesWithCurrentVersion(QUERY); assertExpectedOutput(CREATE_ADD, QUERY); // Insert more data with new client and read with old client executeQueriesWithCurrentVersion(ADD_DATA); executeQueryWithClientVersion(compatibleClientVersion, QUERY_MORE); assertExpectedOutput(ADD_DATA, QUERY_MORE); }
#vulnerable code @Test public void testSelectUpsertWithNewClient() throws Exception { checkForPreConditions(); // Insert data with old client and read with new client executeQueryWithClientVersion(compatibleClientVersion, CREATE_ADD); executeQueriesWithCurrentVersion(QUERY); assertTrue(compareOutput(CREATE_ADD, QUERY)); // Insert more data with new client and read with old client executeQueriesWithCurrentVersion(ADD_DATA); executeQueryWithClientVersion(compatibleClientVersion, QUERY_MORE); assertTrue(compareOutput(ADD_DATA, QUERY_MORE)); } #location 3 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testUpsertSelectSameBatchConcurrently() throws Exception { try (Connection conn = driver.connect(url, props)) { int numUpsertSelectRunners = 5; ExecutorService exec = Executors.newFixedThreadPool(numUpsertSelectRunners); CompletionService<Boolean> completionService = new ExecutorCompletionService<Boolean>(exec); List<Future<Boolean>> futures = Lists.newArrayListWithExpectedSize(numUpsertSelectRunners); // run one UPSERT SELECT for 100 rows (that locks the rows for a long time) futures.add(completionService.submit(new UpsertSelectRunner(dataTable, 0, 105, 1))); // run four UPSERT SELECTS for 5 rows (that overlap with slow running UPSERT SELECT) for (int i = 0; i < 100; i += 25) { futures.add(completionService.submit(new UpsertSelectRunner(dataTable, i, i+25, 5))); } int received = 0; while (received < futures.size()) { Future<Boolean> resultFuture = completionService.take(); Boolean result = resultFuture.get(); received++; assertTrue(result); } exec.shutdownNow(); } }
#vulnerable code @Test public void testUpsertSelectSameBatchConcurrently() throws Exception { final String dataTable = generateUniqueName(); final String index = "IDX_" + dataTable; // create the table and ensure its empty Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); Connection conn = driver.connect(url, props); conn.createStatement() .execute("CREATE TABLE " + dataTable + " (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)"); // create the index and ensure its empty as well conn.createStatement().execute("CREATE INDEX " + index + " ON " + dataTable + " (v1)"); conn = DriverManager.getConnection(getUrl(), props); PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + dataTable + " VALUES(?,?,?)"); conn.setAutoCommit(false); for (int i = 0; i < 100; i++) { stmt.setInt(1, i); stmt.setString(2, "v1" + i); stmt.setString(3, "v2" + i); stmt.execute(); } conn.commit(); int numUpsertSelectRunners = 5; ExecutorService exec = Executors.newFixedThreadPool(numUpsertSelectRunners); CompletionService<Boolean> completionService = new ExecutorCompletionService<Boolean>(exec); List<Future<Boolean>> futures = Lists.newArrayListWithExpectedSize(numUpsertSelectRunners); // run one UPSERT SELECT for 100 rows (that locks the rows for a long time) futures.add(completionService.submit(new UpsertSelectRunner(dataTable, 0, 105, 1))); // run four UPSERT SELECTS for 5 rows (that overlap with slow running UPSERT SELECT) for (int i = 0; i < 100; i += 25) { futures.add(completionService.submit(new UpsertSelectRunner(dataTable, i, i+25, 5))); } int received = 0; while (received < futures.size()) { Future<Boolean> resultFuture = completionService.take(); Boolean result = resultFuture.get(); received++; assertTrue(result); } exec.shutdownNow(); conn.close(); } #location 8 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code protected static void setupTxManager() throws SQLException, IOException { TransactionFactory.getTransactionProvider().getTransactionContext().setupTxManager(config, getUrl()); }
#vulnerable code protected static void setupTxManager() throws SQLException, IOException { TransactionFactory.getTransactionFactory().getTransactionContext().setupTxManager(config, getUrl()); } #location 2 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testUpsertDeleteWithNewClient() throws Exception { // Insert data with old client and read with new client executeQueriesWithCurrentVersion(CREATE_ADD); executeQueryWithClientVersion(compatibleClientVersion, QUERY); assertExpectedOutput(CREATE_ADD, QUERY); // Deletes with the new client executeQueriesWithCurrentVersion(ADD_DELETE); executeQueriesWithCurrentVersion(QUERY_ADD_DELETE); assertExpectedOutput(ADD_DELETE, QUERY_ADD_DELETE); }
#vulnerable code @Test public void testUpsertDeleteWithNewClient() throws Exception { checkForPreConditions(); // Insert data with old client and read with new client executeQueriesWithCurrentVersion(CREATE_ADD); executeQueryWithClientVersion(compatibleClientVersion, QUERY); assertTrue(compareOutput(CREATE_ADD, QUERY)); // Deletes with the new client executeQueriesWithCurrentVersion(ADD_DELETE); executeQueriesWithCurrentVersion(QUERY_ADD_DELETE); assertTrue(compareOutput(ADD_DELETE, QUERY_ADD_DELETE)); } #location 3 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public Expression visitLeave(ArrayConstructorNode node, List<Expression> children) throws SQLException { boolean isChildTypeUnknown = false; Expression arrayElemChild = null; PDataType arrayElemDataType = children.get(0).getDataType(); for (int i = 0; i < children.size(); i++) { Expression child = children.get(i); PDataType childType = child.getDataType(); if (childType == null) { isChildTypeUnknown = true; } else if (arrayElemDataType == null) { arrayElemDataType = childType; isChildTypeUnknown = true; arrayElemChild = child; } else if (arrayElemDataType == childType || childType.isCoercibleTo(arrayElemDataType)) { continue; } else if (arrayElemDataType.isCoercibleTo(childType)) { arrayElemChild = child; arrayElemDataType = childType; } else { throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CONVERT_TYPE) .setMessage( "Case expressions must have common type: " + arrayElemDataType + " cannot be coerced to " + childType).build().buildException(); } } // If we found an "unknown" child type and the return type is a number // make the return type be the most general number type of DECIMAL. if (isChildTypeUnknown && arrayElemDataType != null && arrayElemDataType.isCoercibleTo(PDataType.DECIMAL)) { arrayElemDataType = PDataType.DECIMAL; } final PDataType theArrayElemDataType = arrayElemDataType; for (int i = 0; i < node.getChildren().size(); i++) { ParseNode childNode = node.getChildren().get(i); if (childNode instanceof BindParseNode) { context.getBindManager().addParamMetaData((BindParseNode)childNode, arrayElemDataType == arrayElemChild.getDataType() ? arrayElemChild : new DelegateDatum(arrayElemChild) { @Override public PDataType getDataType() { return theArrayElemDataType; } }); } } ImmutableBytesWritable ptr = context.getTempPtr(); Object[] elements = new Object[children.size()]; ArrayConstructorExpression arrayExpression = new ArrayConstructorExpression(children, arrayElemDataType); if (ExpressionUtil.isConstant(arrayExpression)) { for (int i = 0; i < children.size(); i++) { Expression child = children.get(i); child.evaluate(null, ptr); Object value = arrayElemDataType.toObject(ptr, child.getDataType(), child.getSortOrder()); elements[i] = LiteralExpression.newConstant(value, child.getDataType(), child.isDeterministic()).getValue(); } Object value = PArrayDataType.instantiatePhoenixArray(arrayElemDataType, elements); return LiteralExpression.newConstant(value, PDataType.fromTypeId(arrayElemDataType.getSqlType() + PDataType.ARRAY_TYPE_BASE), true); } return wrapGroupByExpression(arrayExpression); }
#vulnerable code @Override public Expression visitLeave(ArrayConstructorNode node, List<Expression> children) throws SQLException { boolean isChildTypeUnknown = false; Expression arrayElemChild = null; PDataType arrayElemDataType = children.get(0).getDataType(); for (int i = 0; i < children.size(); i++) { Expression child = children.get(i); PDataType childType = child.getDataType(); if (childType == null) { isChildTypeUnknown = true; } else if (arrayElemDataType == null) { arrayElemDataType = childType; isChildTypeUnknown = true; arrayElemChild = child; } else if (arrayElemDataType == childType || childType.isCoercibleTo(arrayElemDataType)) { continue; } else if (arrayElemDataType.isCoercibleTo(childType)) { arrayElemChild = child; arrayElemDataType = childType; } else { throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CONVERT_TYPE) .setMessage( "Case expressions must have common type: " + arrayElemDataType + " cannot be coerced to " + childType).build().buildException(); } } // If we found an "unknown" child type and the return type is a number // make the return type be the most general number type of DECIMAL. if (isChildTypeUnknown && arrayElemDataType != null && arrayElemDataType.isCoercibleTo(PDataType.DECIMAL)) { arrayElemDataType = PDataType.DECIMAL; } final PDataType theArrayElemDataType = arrayElemDataType; for (int i = 0; i < node.getChildren().size(); i++) { ParseNode childNode = node.getChildren().get(i); if (childNode instanceof BindParseNode) { context.getBindManager().addParamMetaData((BindParseNode)childNode, arrayElemDataType == arrayElemChild.getDataType() ? arrayElemChild : new DelegateDatum(arrayElemChild) { @Override public PDataType getDataType() { return theArrayElemDataType; } }); } } ImmutableBytesWritable ptr = context.getTempPtr(); Object[] elements = new Object[children.size()]; if (node.isStateless()) { boolean isDeterministic = true; for (int i = 0; i < children.size(); i++) { Expression child = children.get(i); isDeterministic &= child.isDeterministic(); child.evaluate(null, ptr); Object value = arrayElemDataType.toObject(ptr, child.getDataType(), child.getSortOrder()); elements[i] = LiteralExpression.newConstant(value, child.getDataType(), child.isDeterministic()).getValue(); } Object value = PArrayDataType.instantiatePhoenixArray(arrayElemDataType, elements); return LiteralExpression.newConstant(value, PDataType.fromTypeId(arrayElemDataType.getSqlType() + PDataType.ARRAY_TYPE_BASE), isDeterministic); } ArrayConstructorExpression arrayExpression = new ArrayConstructorExpression(children, arrayElemDataType); return wrapGroupByExpression(arrayExpression); } #location 57 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testIndexRebuildTask() throws Throwable { String baseTable = generateUniqueName(); String viewName = generateUniqueName(); Connection conn = null; Connection tenantConn = null; try { conn = DriverManager.getConnection(getUrl()); conn.setAutoCommit(false); Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, TENANT1); tenantConn =DriverManager.getConnection(getUrl(), props); String ddlFormat = "CREATE TABLE IF NOT EXISTS " + baseTable + " (" + " %s PK2 VARCHAR NOT NULL, V1 VARCHAR, V2 VARCHAR " + " CONSTRAINT NAME_PK PRIMARY KEY (%s PK2)" + " ) %s"; conn.createStatement().execute(generateDDL(ddlFormat)); conn.commit(); // Create a view String viewDDL = "CREATE VIEW " + viewName + " AS SELECT * FROM " + baseTable; tenantConn.createStatement().execute(viewDDL); // Create index String indexName = generateUniqueName(); String idxSDDL = String.format("CREATE INDEX %s ON %s (V1)", indexName, viewName); tenantConn.createStatement().execute(idxSDDL); // Insert rows int numOfValues = 1000; for (int i=0; i < numOfValues; i++){ tenantConn.createStatement().execute( String.format("UPSERT INTO %s VALUES('%s', '%s', '%s')", viewName, String.valueOf(i), "y", "z")); } tenantConn.commit(); waitForIndexRebuild(conn, indexName, PIndexState.ACTIVE); String viewIndexTableName = MetaDataUtil.getViewIndexPhysicalName(baseTable); ConnectionQueryServices queryServices = conn.unwrap(PhoenixConnection.class).getQueryServices(); Table indexHTable = queryServices.getTable(Bytes.toBytes(viewIndexTableName)); int count = getUtility().countRows(indexHTable); assertEquals(numOfValues, count); // Alter to Unusable makes the index status inactive. // If I Alter to DISABLE, it fails to in Index tool while setting state to active due to Invalid transition. tenantConn.createStatement().execute( String.format("ALTER INDEX %s ON %s UNUSABLE", indexName, viewName)); tenantConn.commit(); // Remove index contents and try again Admin admin = queryServices.getAdmin(); TableName tableName = TableName.valueOf(viewIndexTableName); admin.disableTable(tableName); admin.truncateTable(tableName, false); count = getUtility().countRows(indexHTable); assertEquals(0, count); String data = "{IndexName:" + indexName + ", DisableBefore: true}"; // Run IndexRebuildTask TaskRegionObserver.SelfHealingTask task = new TaskRegionObserver.SelfHealingTask( TaskRegionEnvironment, QueryServicesOptions.DEFAULT_TASK_HANDLING_MAX_INTERVAL_MS); Timestamp startTs = new Timestamp(EnvironmentEdgeManager.currentTimeMillis()); Task.addTask(conn.unwrap(PhoenixConnection.class), PTable.TaskType.INDEX_REBUILD, TENANT1, null, viewName, PTable.TaskStatus.CREATED.toString(), data, null, startTs, null, true); task.run(); // Check task status and other column values. waitForTaskState(conn, PTable.TaskType.INDEX_REBUILD, viewName, PTable.TaskStatus.COMPLETED); // See that index is rebuilt and confirm index has rows count = getUtility().countRows(indexHTable); assertEquals(numOfValues, count); } finally { if (conn != null) { conn.createStatement().execute("DELETE " + " FROM " + PhoenixDatabaseMetaData.SYSTEM_TASK_NAME + " WHERE TABLE_NAME ='" + viewName + "'"); conn.commit(); conn.close(); } if (tenantConn != null) { tenantConn.close(); } } }
#vulnerable code @Test public void testIndexRebuildTask() throws Throwable { String baseTable = generateUniqueName(); Connection conn = null; Connection viewConn = null; try { conn = DriverManager.getConnection(getUrl()); conn.setAutoCommit(false); Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, TENANT1); viewConn =DriverManager.getConnection(getUrl(), props); String ddlFormat = "CREATE TABLE IF NOT EXISTS " + baseTable + " (" + " %s PK2 VARCHAR NOT NULL, V1 VARCHAR, V2 VARCHAR " + " CONSTRAINT NAME_PK PRIMARY KEY (%s PK2)" + " ) %s"; conn.createStatement().execute(generateDDL(ddlFormat)); conn.commit(); // Create a view String viewName = generateUniqueName(); String viewDDL = "CREATE VIEW " + viewName + " AS SELECT * FROM " + baseTable; viewConn.createStatement().execute(viewDDL); // Create index String indexName = generateUniqueName(); String idxSDDL = String.format("CREATE INDEX %s ON %s (V1)", indexName, viewName); viewConn.createStatement().execute(idxSDDL); // Insert rows int numOfValues = 1000; for (int i=0; i < numOfValues; i++){ viewConn.createStatement().execute( String.format("UPSERT INTO %s VALUES('%s', '%s', '%s')", viewName, String.valueOf(i), "y", "z")); } viewConn.commit(); String data = "{IndexName:" + indexName + "}"; // Run IndexRebuildTask TaskRegionObserver.SelfHealingTask task = new TaskRegionObserver.SelfHealingTask( TaskRegionEnvironment, QueryServicesOptions.DEFAULT_TASK_HANDLING_MAX_INTERVAL_MS); Timestamp startTs = new Timestamp(EnvironmentEdgeManager.currentTimeMillis()); // Add a task to System.Task to build indexes Task.addTask(conn.unwrap(PhoenixConnection.class), PTable.TaskType.INDEX_REBUILD, TENANT1, null, viewName, PTable.TaskStatus.CREATED.toString(), data, null, startTs, null, true); task.run(); String viewIndexTableName = MetaDataUtil.getViewIndexPhysicalName(baseTable); ConnectionQueryServices queryServices = conn.unwrap(PhoenixConnection.class).getQueryServices(); int count = getUtility().countRows(queryServices.getTable(Bytes.toBytes(viewIndexTableName))); assertTrue(count == numOfValues); // Remove index contents and try again Admin admin = queryServices.getAdmin(); TableName tableName = TableName.valueOf(viewIndexTableName); admin.disableTable(tableName); admin.truncateTable(tableName, false); data = "{IndexName:" + indexName + ", DisableBefore:true}"; // Add a new task (update status to created) to System.Task to rebuild indexes Task.addTask(conn.unwrap(PhoenixConnection.class), PTable.TaskType.INDEX_REBUILD, TENANT1, null, viewName, PTable.TaskStatus.CREATED.toString(), data, null, startTs, null, true); task.run(); Table systemHTable= queryServices.getTable(Bytes.toBytes("SYSTEM."+PhoenixDatabaseMetaData.SYSTEM_TASK_TABLE)); count = getUtility().countRows(systemHTable); assertEquals(1, count); // Check task status and other column values. waitForTaskState(conn, PTable.TaskType.INDEX_REBUILD, PTable.TaskStatus.COMPLETED); // See that index is rebuilt and confirm index has rows Table htable= queryServices.getTable(Bytes.toBytes(viewIndexTableName)); count = getUtility().countRows(htable); assertEquals(numOfValues, count); } finally { conn.createStatement().execute("DELETE " + " FROM " + PhoenixDatabaseMetaData.SYSTEM_TASK_NAME); conn.commit(); if (conn != null) { conn.close(); } if (viewConn != null) { viewConn.close(); } } } #location 86 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void modifyTable(byte[] tableName, HTableDescriptor newDesc) throws IOException, InterruptedException, TimeoutException { try (HBaseAdmin admin = new HBaseAdmin(config)) { if (!allowOnlineTableSchemaUpdate()) { admin.disableTable(tableName); admin.modifyTable(tableName, newDesc); admin.enableTable(tableName); } else { admin.modifyTable(tableName, newDesc); pollForUpdatedTableDescriptor(admin, newDesc, tableName); } } }
#vulnerable code @Override public void modifyTable(byte[] tableName, HTableDescriptor newDesc) throws IOException, InterruptedException, TimeoutException { HBaseAdmin admin = new HBaseAdmin(config); if (!allowOnlineTableSchemaUpdate()) { admin.disableTable(tableName); admin.modifyTable(tableName, newDesc); admin.enableTable(tableName); } else { admin.modifyTable(tableName, newDesc); pollForUpdatedTableDescriptor(admin, newDesc, tableName); } } #location 8 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override protected void reduce(TableRowkeyPair key, Iterable<ImmutableBytesWritable> values, Reducer<TableRowkeyPair, ImmutableBytesWritable, TableRowkeyPair, KeyValue>.Context context) throws IOException, InterruptedException { TreeSet<KeyValue> map = new TreeSet<KeyValue>(KeyValue.COMPARATOR); ImmutableBytesWritable rowKey = key.getRowkey(); for (ImmutableBytesWritable aggregatedArray : values) { DataInputStream input = new DataInputStream(new ByteArrayInputStream(aggregatedArray.get())); while (input.available() != 0) { byte type = input.readByte(); int index = WritableUtils.readVInt(input); ImmutableBytesWritable family; ImmutableBytesWritable name; ImmutableBytesWritable value = QueryConstants.EMPTY_COLUMN_VALUE_BYTES_PTR; if (index == -1) { family = emptyFamilyName.get(key.getTableName()); name = QueryConstants.EMPTY_COLUMN_BYTES_PTR; } else { Pair<byte[], byte[]> pair = columnIndexes.get(index); if(pair.getFirst() != null) { family = new ImmutableBytesWritable(pair.getFirst()); } else { family = emptyFamilyName.get(key.getTableName()); } name = new ImmutableBytesWritable(pair.getSecond()); } int len = WritableUtils.readVInt(input); if (len > 0) { byte[] array = new byte[len]; input.read(array); value = new ImmutableBytesWritable(array); } KeyValue kv; KeyValue.Type kvType = KeyValue.Type.codeToType(type); switch (kvType) { case Put: // not null value kv = builder.buildPut(key.getRowkey(), family, name, value); break; case DeleteColumn: // null value kv = builder.buildDeleteColumns(key.getRowkey(), family, name); break; default: throw new IOException("Unsupported KeyValue type " + kvType); } map.add(kv); } Closeables.closeQuietly(input); } context.setStatus("Read " + map.getClass()); int index = 0; for (KeyValue kv : map) { context.write(key, kv); if (++index % 100 == 0) context.setStatus("Wrote " + index); } }
#vulnerable code @Override protected void reduce(TableRowkeyPair key, Iterable<ImmutableBytesWritable> values, Reducer<TableRowkeyPair, ImmutableBytesWritable, TableRowkeyPair, KeyValue>.Context context) throws IOException, InterruptedException { TreeSet<KeyValue> map = new TreeSet<KeyValue>(KeyValue.COMPARATOR); int tableIndex = tableNames.indexOf(key.getTableName()); List<Pair<byte[], byte[]>> columns = columnIndexes.get(tableIndex); for (ImmutableBytesWritable aggregatedArray : values) { DataInputStream input = new DataInputStream(new ByteArrayInputStream(aggregatedArray.get())); while (input.available() != 0) { int index = WritableUtils.readVInt(input); Pair<byte[], byte[]> pair = columns.get(index); byte type = input.readByte(); ImmutableBytesWritable value = null; int len = WritableUtils.readVInt(input); if (len > 0) { byte[] array = new byte[len]; input.read(array); value = new ImmutableBytesWritable(array); } KeyValue kv; KeyValue.Type kvType = KeyValue.Type.codeToType(type); switch (kvType) { case Put: // not null value kv = builder.buildPut(key.getRowkey(), new ImmutableBytesWritable(pair.getFirst()), new ImmutableBytesWritable(pair.getSecond()), value); break; case DeleteColumn: // null value kv = builder.buildDeleteColumns(key.getRowkey(), new ImmutableBytesWritable(pair.getFirst()), new ImmutableBytesWritable(pair.getSecond())); break; default: throw new IOException("Unsupported KeyValue type " + kvType); } map.add(kv); } KeyValue empty = builder.buildPut(key.getRowkey(), emptyFamilyName.get(tableIndex), QueryConstants.EMPTY_COLUMN_BYTES_PTR, ByteUtil.EMPTY_BYTE_ARRAY_PTR); map.add(empty); Closeables.closeQuietly(input); } context.setStatus("Read " + map.getClass()); int index = 0; for (KeyValue kv : map) { context.write(key, kv); if (++index % 100 == 0) context.setStatus("Wrote " + index); } } #location 20 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private static Object[] coerceToEqualLength(PDataType baseType, Object[] elements) { if (elements == null || elements.length == 0) { return elements; } int maxLength = 0; boolean resizeElements = false; for (int i = 0; i < elements.length; i++) { Integer length = baseType.getMaxLength(elements[i]); if (length != null) { if (maxLength == 0){ maxLength = length; continue; } if (length > maxLength) { maxLength = length; resizeElements = true; } else if (length < maxLength) { resizeElements = true; } } else { resizeElements = true; } } if (!resizeElements) { return elements; } return coerceToNewLength(baseType, elements, maxLength); }
#vulnerable code private static Object[] coerceToEqualLength(PDataType baseType, Object[] elements) { if (elements == null || elements.length == 0) { return elements; } Object element = elements[0]; int maxLength = baseType.getMaxLength(element); boolean resizeElements = false; for (int i = 1; i < elements.length; i++) { int length = baseType.getMaxLength(elements[i]); if (length > maxLength) { maxLength = length; resizeElements = true; } else if (length < maxLength) { resizeElements = true; } } if (!resizeElements) { return elements; } return coerceToNewLength(baseType, elements, maxLength); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private DeleteType getDeleteTypeOrNull(Collection<? extends Cell> pendingUpdates, int nCFs) { int nDeleteCF = 0; int nDeleteVersionCF = 0; for (Cell kv : pendingUpdates) { if (kv.getTypeByte() == KeyValue.Type.DeleteFamilyVersion.getCode()) { nDeleteVersionCF++; } else if (kv.getTypeByte() == KeyValue.Type.DeleteFamily.getCode() // Since we don't include the index rows in the change set for txn tables, we need to detect row deletes that have transformed by TransactionProcessor || (CellUtil.matchingQualifier(kv, TransactionFactory.getTransactionProvider().getTransactionContext().getFamilyDeleteMarker()) && CellUtil.matchingValue(kv, HConstants.EMPTY_BYTE_ARRAY))) { nDeleteCF++; } } // This is what a delete looks like on the server side for mutable indexing... // Should all be one or the other for DeleteFamily versus DeleteFamilyVersion, but just in case not DeleteType deleteType = null; if (nDeleteVersionCF > 0 && nDeleteVersionCF >= nCFs) { deleteType = DeleteType.SINGLE_VERSION; } else { int nDelete = nDeleteCF + nDeleteVersionCF; if (nDelete>0 && nDelete >= nCFs) { deleteType = DeleteType.ALL_VERSIONS; } } return deleteType; }
#vulnerable code private DeleteType getDeleteTypeOrNull(Collection<? extends Cell> pendingUpdates, int nCFs) { int nDeleteCF = 0; int nDeleteVersionCF = 0; for (Cell kv : pendingUpdates) { if (kv.getTypeByte() == KeyValue.Type.DeleteFamilyVersion.getCode()) { nDeleteVersionCF++; } else if (kv.getTypeByte() == KeyValue.Type.DeleteFamily.getCode() // Since we don't include the index rows in the change set for txn tables, we need to detect row deletes that have transformed by TransactionProcessor || (CellUtil.matchingQualifier(kv, TransactionFactory.getTransactionFactory().getTransactionContext().getFamilyDeleteMarker()) && CellUtil.matchingValue(kv, HConstants.EMPTY_BYTE_ARRAY))) { nDeleteCF++; } } // This is what a delete looks like on the server side for mutable indexing... // Should all be one or the other for DeleteFamily versus DeleteFamilyVersion, but just in case not DeleteType deleteType = null; if (nDeleteVersionCF > 0 && nDeleteVersionCF >= nCFs) { deleteType = DeleteType.SINGLE_VERSION; } else { int nDelete = nDeleteCF + nDeleteVersionCF; if (nDelete>0 && nDelete >= nCFs) { deleteType = DeleteType.ALL_VERSIONS; } } return deleteType; } #location 10 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testIndexQos() throws Exception { Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); Connection conn = driver.connect(getUrl(), props); try { // create the table createTable(conn, dataTableFullName); // create the index createIndex(conn, indexName); ensureTablesOnDifferentRegionServers(dataTableFullName, indexTableFullName); upsertRow(conn, dataTableFullName); // run select query that should use the index String selectSql = "SELECT k, v2 from " + dataTableFullName + " WHERE v1=?"; PreparedStatement stmt = conn.prepareStatement(selectSql); stmt.setString(1, "v1"); // verify that the query does a range scan on the index table ResultSet rs = stmt.executeQuery("EXPLAIN " + selectSql); assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + indexTableFullName + " ['v1']", QueryUtil.getExplainPlan(rs)); // verify that the correct results are returned rs = stmt.executeQuery(); assertTrue(rs.next()); assertEquals("k1", rs.getString(1)); assertEquals("v2", rs.getString(2)); assertFalse(rs.next()); // drop index table conn.createStatement().execute( "DROP INDEX " + indexName + " ON " + dataTableFullName ); // create a data table with the same name as the index table createTable(conn, indexTableFullName); // upsert one row to the table (which has the same table name as the previous index table) upsertRow(conn, indexTableFullName); // run select query on the new table selectSql = "SELECT k, v2 from " + indexTableFullName + " WHERE v1=?"; stmt = conn.prepareStatement(selectSql); stmt.setString(1, "v1"); // verify that the correct results are returned rs = stmt.executeQuery(); assertTrue(rs.next()); assertEquals("k1", rs.getString(1)); assertEquals("v2", rs.getString(2)); assertFalse(rs.next()); // verify that that index queue is used only once (for the first upsert) Mockito.verify(TestPhoenixIndexRpcSchedulerFactory.getIndexRpcExecutor()).dispatch(Mockito.any(CallRunner.class)); TestPhoenixIndexRpcSchedulerFactory.reset(); createIndex(conn, indexName + "_1"); // verify that that index queue is used and only once (during Upsert Select on server to build the index) Mockito.verify(TestPhoenixIndexRpcSchedulerFactory.getIndexRpcExecutor()).dispatch(Mockito.any(CallRunner.class)); } finally { conn.close(); } }
#vulnerable code @Test public void testIndexQos() throws Exception { Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); Connection conn = driver.connect(getUrl(), props); try { // create the table conn.createStatement().execute( "CREATE TABLE " + dataTableFullName + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)"); // create the index conn.createStatement().execute( "CREATE INDEX " + indexName + " ON " + dataTableFullName + " (v1) INCLUDE (v2)"); ensureTablesOnDifferentRegionServers(dataTableFullName, indexTableFullName); PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + dataTableFullName + " VALUES(?,?,?)"); stmt.setString(1, "k1"); stmt.setString(2, "v1"); stmt.setString(3, "v2"); stmt.execute(); conn.commit(); // run select query that should use the index String selectSql = "SELECT k, v2 from " + dataTableFullName + " WHERE v1=?"; stmt = conn.prepareStatement(selectSql); stmt.setString(1, "v1"); // verify that the query does a range scan on the index table ResultSet rs = stmt.executeQuery("EXPLAIN " + selectSql); assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + indexTableFullName + " ['v1']", QueryUtil.getExplainPlan(rs)); // verify that the correct results are returned rs = stmt.executeQuery(); assertTrue(rs.next()); assertEquals("k1", rs.getString(1)); assertEquals("v2", rs.getString(2)); assertFalse(rs.next()); // drop index table conn.createStatement().execute( "DROP INDEX " + indexName + " ON " + dataTableFullName ); // create a data table with the same name as the index table conn.createStatement().execute( "CREATE TABLE " + indexTableFullName + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)"); // upsert one row to the table (which has the same table name as the previous index table) stmt = conn.prepareStatement("UPSERT INTO " + indexTableFullName + " VALUES(?,?,?)"); stmt.setString(1, "k1"); stmt.setString(2, "v1"); stmt.setString(3, "v2"); stmt.execute(); conn.commit(); // run select query on the new table selectSql = "SELECT k, v2 from " + indexTableFullName + " WHERE v1=?"; stmt = conn.prepareStatement(selectSql); stmt.setString(1, "v1"); // verify that the correct results are returned rs = stmt.executeQuery(); assertTrue(rs.next()); assertEquals("k1", rs.getString(1)); assertEquals("v2", rs.getString(2)); assertFalse(rs.next()); // verify that that index queue is used only once (for the first upsert) Mockito.verify(TestPhoenixIndexRpcSchedulerFactory.getIndexRpcExecutor()).dispatch(Mockito.any(CallRunner.class)); TestPhoenixIndexRpcSchedulerFactory.reset(); conn.createStatement().execute( "CREATE INDEX " + indexName + "_1 ON " + dataTableFullName + " (v1) INCLUDE (v2)"); // verify that that index queue is used and only once (during Upsert Select on server to build the index) Mockito.verify(TestPhoenixIndexRpcSchedulerFactory.getIndexRpcExecutor()).dispatch(Mockito.any(CallRunner.class)); } finally { conn.close(); } } #location 7 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override protected RegionScanner doPostScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c, final Scan scan, final RegionScanner s) throws IOException { RegionCoprocessorEnvironment env = c.getEnvironment(); Region region = env.getRegion(); long ts = scan.getTimeRange().getMax(); boolean localIndexScan = ScanUtil.isLocalIndex(scan); if (ScanUtil.isAnalyzeTable(scan)) { byte[] gp_width_bytes = scan.getAttribute(BaseScannerRegionObserver.GUIDEPOST_WIDTH_BYTES); byte[] gp_per_region_bytes = scan.getAttribute(BaseScannerRegionObserver.GUIDEPOST_PER_REGION); // Let this throw, as this scan is being done for the sole purpose of collecting stats StatisticsCollector statsCollector = StatisticsCollectorFactory.createStatisticsCollector( env, region.getRegionInfo().getTable().getNameAsString(), ts, gp_width_bytes, gp_per_region_bytes); return collectStats(s, statsCollector, region, scan, env.getConfiguration()); } int offsetToBe = 0; if (localIndexScan) { /* * For local indexes, we need to set an offset on row key expressions to skip * the region start key. */ offsetToBe = region.getRegionInfo().getStartKey().length != 0 ? region.getRegionInfo().getStartKey().length : region.getRegionInfo().getEndKey().length; ScanUtil.setRowKeyOffset(scan, offsetToBe); } final int offset = offsetToBe; PTable projectedTable = null; PTable writeToTable = null; byte[][] values = null; byte[] descRowKeyTableBytes = scan.getAttribute(UPGRADE_DESC_ROW_KEY); boolean isDescRowKeyOrderUpgrade = descRowKeyTableBytes != null; if (isDescRowKeyOrderUpgrade) { logger.debug("Upgrading row key for " + region.getRegionInfo().getTable().getNameAsString()); projectedTable = deserializeTable(descRowKeyTableBytes); try { writeToTable = PTableImpl.makePTable(projectedTable, true); } catch (SQLException e) { ServerUtil.throwIOException("Upgrade failed", e); // Impossible } values = new byte[projectedTable.getPKColumns().size()][]; } byte[] localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD); List<IndexMaintainer> indexMaintainers = localIndexBytes == null ? null : IndexMaintainer.deserialize(localIndexBytes); List<Mutation> indexMutations = localIndexBytes == null ? Collections.<Mutation>emptyList() : Lists.<Mutation>newArrayListWithExpectedSize(1024); RegionScanner theScanner = s; byte[] indexUUID = scan.getAttribute(PhoenixIndexCodec.INDEX_UUID); List<Expression> selectExpressions = null; byte[] upsertSelectTable = scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_TABLE); boolean isUpsert = false; boolean isDelete = false; byte[] deleteCQ = null; byte[] deleteCF = null; byte[] emptyCF = null; ImmutableBytesWritable ptr = new ImmutableBytesWritable(); if (upsertSelectTable != null) { isUpsert = true; projectedTable = deserializeTable(upsertSelectTable); selectExpressions = deserializeExpressions(scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_EXPRS)); values = new byte[projectedTable.getPKColumns().size()][]; } else { byte[] isDeleteAgg = scan.getAttribute(BaseScannerRegionObserver.DELETE_AGG); isDelete = isDeleteAgg != null && Bytes.compareTo(PDataType.TRUE_BYTES, isDeleteAgg) == 0; if (!isDelete) { deleteCF = scan.getAttribute(BaseScannerRegionObserver.DELETE_CF); deleteCQ = scan.getAttribute(BaseScannerRegionObserver.DELETE_CQ); } emptyCF = scan.getAttribute(BaseScannerRegionObserver.EMPTY_CF); } TupleProjector tupleProjector = null; byte[][] viewConstants = null; ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan); final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan); final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan); if ((localIndexScan && !isDelete && !isDescRowKeyOrderUpgrade) || (j == null && p != null)) { if (dataColumns != null) { tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns); viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan); } ImmutableBytesWritable tempPtr = new ImmutableBytesWritable(); theScanner = getWrappedScanner(c, theScanner, offset, scan, dataColumns, tupleProjector, region, indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr); } if (j != null) { theScanner = new HashJoinRegionScanner(theScanner, p, j, ScanUtil.getTenantId(scan), env); } int batchSize = 0; List<Mutation> mutations = Collections.emptyList(); boolean needToWrite = false; Configuration conf = c.getEnvironment().getConfiguration(); long flushSize = region.getTableDesc().getMemStoreFlushSize(); if (flushSize <= 0) { flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE); } /** * Slow down the writes if the memstore size more than * (hbase.hregion.memstore.block.multiplier - 1) times hbase.hregion.memstore.flush.size * bytes. This avoids flush storm to hdfs for cases like index building where reads and * write happen to all the table regions in the server. */ final long blockingMemStoreSize = flushSize * ( conf.getLong(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER)-1) ; boolean buildLocalIndex = indexMaintainers != null && dataColumns==null && !localIndexScan; if (isDescRowKeyOrderUpgrade || isDelete || isUpsert || (deleteCQ != null && deleteCF != null) || emptyCF != null || buildLocalIndex) { needToWrite = true; // TODO: size better mutations = Lists.newArrayListWithExpectedSize(1024); batchSize = env.getConfiguration().getInt(MUTATE_BATCH_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE); } Aggregators aggregators = ServerAggregators.deserialize( scan.getAttribute(BaseScannerRegionObserver.AGGREGATORS), env.getConfiguration()); Aggregator[] rowAggregators = aggregators.getAggregators(); boolean hasMore; boolean hasAny = false; MultiKeyValueTuple result = new MultiKeyValueTuple(); if (logger.isDebugEnabled()) { logger.debug(LogUtil.addCustomAnnotations("Starting ungrouped coprocessor scan " + scan + " "+region.getRegionInfo(), ScanUtil.getCustomAnnotations(scan))); } long rowCount = 0; final RegionScanner innerScanner = theScanner; boolean acquiredLock = false; try { if(needToWrite) { synchronized (lock) { scansReferenceCount++; } } region.startRegionOperation(); acquiredLock = true; synchronized (innerScanner) { do { List<Cell> results = new ArrayList<Cell>(); // Results are potentially returned even when the return value of s.next is false // since this is an indication of whether or not there are more values after the // ones returned hasMore = innerScanner.nextRaw(results); if (!results.isEmpty()) { rowCount++; result.setKeyValues(results); try { if (isDescRowKeyOrderUpgrade) { Arrays.fill(values, null); Cell firstKV = results.get(0); RowKeySchema schema = projectedTable.getRowKeySchema(); int maxOffset = schema.iterator(firstKV.getRowArray(), firstKV.getRowOffset() + offset, firstKV.getRowLength(), ptr); for (int i = 0; i < schema.getFieldCount(); i++) { Boolean hasValue = schema.next(ptr, i, maxOffset); if (hasValue == null) { break; } Field field = schema.getField(i); if (field.getSortOrder() == SortOrder.DESC) { // Special case for re-writing DESC ARRAY, as the actual byte value needs to change in this case if (field.getDataType().isArrayType()) { field.getDataType().coerceBytes(ptr, null, field.getDataType(), field.getMaxLength(), field.getScale(), field.getSortOrder(), field.getMaxLength(), field.getScale(), field.getSortOrder(), true); // force to use correct separator byte } // Special case for re-writing DESC CHAR or DESC BINARY, to force the re-writing of trailing space characters else if (field.getDataType() == PChar.INSTANCE || field.getDataType() == PBinary.INSTANCE) { int len = ptr.getLength(); while (len > 0 && ptr.get()[ptr.getOffset() + len - 1] == StringUtil.SPACE_UTF8) { len--; } ptr.set(ptr.get(), ptr.getOffset(), len); // Special case for re-writing DESC FLOAT and DOUBLE, as they're not inverted like they should be (PHOENIX-2171) } else if (field.getDataType() == PFloat.INSTANCE || field.getDataType() == PDouble.INSTANCE) { byte[] invertedBytes = SortOrder.invert(ptr.get(), ptr.getOffset(), ptr.getLength()); ptr.set(invertedBytes); } } else if (field.getDataType() == PBinary.INSTANCE) { // Remove trailing space characters so that the setValues call below will replace them // with the correct zero byte character. Note this is somewhat dangerous as these // could be legit, but I don't know what the alternative is. int len = ptr.getLength(); while (len > 0 && ptr.get()[ptr.getOffset() + len - 1] == StringUtil.SPACE_UTF8) { len--; } ptr.set(ptr.get(), ptr.getOffset(), len); } values[i] = ptr.copyBytes(); } writeToTable.newKey(ptr, values); if (Bytes.compareTo( firstKV.getRowArray(), firstKV.getRowOffset() + offset, firstKV.getRowLength(), ptr.get(),ptr.getOffset() + offset,ptr.getLength()) == 0) { continue; } byte[] newRow = ByteUtil.copyKeyBytesIfNecessary(ptr); if (offset > 0) { // for local indexes (prepend region start key) byte[] newRowWithOffset = new byte[offset + newRow.length]; System.arraycopy(firstKV.getRowArray(), firstKV.getRowOffset(), newRowWithOffset, 0, offset);; System.arraycopy(newRow, 0, newRowWithOffset, offset, newRow.length); newRow = newRowWithOffset; } byte[] oldRow = Bytes.copy(firstKV.getRowArray(), firstKV.getRowOffset(), firstKV.getRowLength()); for (Cell cell : results) { // Copy existing cell but with new row key Cell newCell = new KeyValue(newRow, 0, newRow.length, cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), cell.getTimestamp(), KeyValue.Type.codeToType(cell.getTypeByte()), cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); switch (KeyValue.Type.codeToType(cell.getTypeByte())) { case Put: // If Put, point delete old Put Delete del = new Delete(oldRow); del.addDeleteMarker(new KeyValue(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), cell.getTimestamp(), KeyValue.Type.Delete, ByteUtil.EMPTY_BYTE_ARRAY, 0, 0)); mutations.add(del); Put put = new Put(newRow); put.add(newCell); mutations.add(put); break; case Delete: case DeleteColumn: case DeleteFamily: case DeleteFamilyVersion: Delete delete = new Delete(newRow); delete.addDeleteMarker(newCell); mutations.add(delete); break; } } } else if (buildLocalIndex) { for (IndexMaintainer maintainer : indexMaintainers) { if (!results.isEmpty()) { result.getKey(ptr); ValueGetter valueGetter = maintainer.createGetterFromKeyValues( ImmutableBytesPtr.copyBytesIfNecessary(ptr), results); Put put = maintainer.buildUpdateMutation(kvBuilder, valueGetter, ptr, results.get(0).getTimestamp(), env.getRegion().getRegionInfo().getStartKey(), env.getRegion().getRegionInfo().getEndKey()); indexMutations.add(put); } } result.setKeyValues(results); } else if (isDelete) { // FIXME: the version of the Delete constructor without the lock // args was introduced in 0.94.4, thus if we try to use it here // we can no longer use the 0.94.2 version of the client. Cell firstKV = results.get(0); Delete delete = new Delete(firstKV.getRowArray(), firstKV.getRowOffset(), firstKV.getRowLength(),ts); mutations.add(delete); // force tephra to ignore this deletes delete.setAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]); } else if (isUpsert) { Arrays.fill(values, null); int i = 0; List<PColumn> projectedColumns = projectedTable.getColumns(); for (; i < projectedTable.getPKColumns().size(); i++) { Expression expression = selectExpressions.get(i); if (expression.evaluate(result, ptr)) { values[i] = ptr.copyBytes(); // If SortOrder from expression in SELECT doesn't match the // column being projected into then invert the bits. if (expression.getSortOrder() != projectedColumns.get(i).getSortOrder()) { SortOrder.invert(values[i], 0, values[i], 0, values[i].length); } } } projectedTable.newKey(ptr, values); PRow row = projectedTable.newRow(kvBuilder, ts, ptr); for (; i < projectedColumns.size(); i++) { Expression expression = selectExpressions.get(i); if (expression.evaluate(result, ptr)) { PColumn column = projectedColumns.get(i); Object value = expression.getDataType() .toObject(ptr, column.getSortOrder()); // We are guaranteed that the two column will have the // same type. if (!column.getDataType().isSizeCompatible(ptr, value, column.getDataType(), expression.getMaxLength(), expression.getScale(), column.getMaxLength(), column.getScale())) { throw new DataExceedsCapacityException( column.getDataType(), column.getMaxLength(), column.getScale()); } column.getDataType().coerceBytes(ptr, value, expression.getDataType(), expression.getMaxLength(), expression.getScale(), expression.getSortOrder(), column.getMaxLength(), column.getScale(), column.getSortOrder(), projectedTable.rowKeyOrderOptimizable()); byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr); row.setValue(column, bytes); } } for (Mutation mutation : row.toRowMutations()) { mutations.add(mutation); } for (i = 0; i < selectExpressions.size(); i++) { selectExpressions.get(i).reset(); } } else if (deleteCF != null && deleteCQ != null) { // No need to search for delete column, since we project only it // if no empty key value is being set if (emptyCF == null || result.getValue(deleteCF, deleteCQ) != null) { Delete delete = new Delete(results.get(0).getRowArray(), results.get(0).getRowOffset(), results.get(0).getRowLength()); delete.deleteColumns(deleteCF, deleteCQ, ts); // force tephra to ignore this deletes delete.setAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]); mutations.add(delete); } } if (emptyCF != null) { /* * If we've specified an emptyCF, then we need to insert an empty * key value "retroactively" for any key value that is visible at * the timestamp that the DDL was issued. Key values that are not * visible at this timestamp will not ever be projected up to * scans past this timestamp, so don't need to be considered. * We insert one empty key value per row per timestamp. */ Set<Long> timeStamps = Sets.newHashSetWithExpectedSize(results.size()); for (Cell kv : results) { long kvts = kv.getTimestamp(); if (!timeStamps.contains(kvts)) { Put put = new Put(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength()); put.add(emptyCF, QueryConstants.EMPTY_COLUMN_BYTES, kvts, ByteUtil.EMPTY_BYTE_ARRAY); mutations.add(put); } } } // Commit in batches based on UPSERT_BATCH_SIZE_ATTRIB in config if (!mutations.isEmpty() && batchSize > 0 && mutations.size() % batchSize == 0) { commitBatch(region, mutations, indexUUID, blockingMemStoreSize); mutations.clear(); } // Commit in batches based on UPSERT_BATCH_SIZE_ATTRIB in config if (!indexMutations.isEmpty() && batchSize > 0 && indexMutations.size() % batchSize == 0) { commitBatch(region, indexMutations, null, blockingMemStoreSize); indexMutations.clear(); } } catch (ConstraintViolationException e) { // Log and ignore in count logger.error(LogUtil.addCustomAnnotations("Failed to create row in " + region.getRegionInfo().getRegionNameAsString() + " with values " + SchemaUtil.toString(values), ScanUtil.getCustomAnnotations(scan)), e); continue; } aggregators.aggregate(rowAggregators, result); hasAny = true; } } while (hasMore); if (!mutations.isEmpty()) { commitBatch(region,mutations, indexUUID, blockingMemStoreSize); } if (!indexMutations.isEmpty()) { commitBatch(region,indexMutations, null, blockingMemStoreSize); indexMutations.clear(); } } } finally { if(needToWrite) { synchronized (lock) { scansReferenceCount--; } } try { innerScanner.close(); } finally { if (acquiredLock) region.closeRegionOperation(); } } if (logger.isDebugEnabled()) { logger.debug(LogUtil.addCustomAnnotations("Finished scanning " + rowCount + " rows for ungrouped coprocessor scan " + scan, ScanUtil.getCustomAnnotations(scan))); } final boolean hadAny = hasAny; KeyValue keyValue = null; if (hadAny) { byte[] value = aggregators.toBytes(rowAggregators); keyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length); } final KeyValue aggKeyValue = keyValue; RegionScanner scanner = new BaseRegionScanner(innerScanner) { private boolean done = !hadAny; @Override public boolean isFilterDone() { return done; } @Override public boolean next(List<Cell> results) throws IOException { if (done) return false; done = true; results.add(aggKeyValue); return false; } @Override public long getMaxResultSize() { return scan.getMaxResultSize(); } }; return scanner; }
#vulnerable code @Override protected RegionScanner doPostScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c, final Scan scan, final RegionScanner s) throws IOException { RegionCoprocessorEnvironment env = c.getEnvironment(); Region region = env.getRegion(); long ts = scan.getTimeRange().getMax(); boolean localIndexScan = ScanUtil.isLocalIndex(scan); if (ScanUtil.isAnalyzeTable(scan)) { byte[] gp_width_bytes = scan.getAttribute(BaseScannerRegionObserver.GUIDEPOST_WIDTH_BYTES); byte[] gp_per_region_bytes = scan.getAttribute(BaseScannerRegionObserver.GUIDEPOST_PER_REGION); // Let this throw, as this scan is being done for the sole purpose of collecting stats StatisticsCollector statsCollector = StatisticsCollectorFactory.createStatisticsCollector( env, region.getRegionInfo().getTable().getNameAsString(), ts, gp_width_bytes, gp_per_region_bytes); return collectStats(s, statsCollector, region, scan, env.getConfiguration()); } int offsetToBe = 0; if (localIndexScan) { /* * For local indexes, we need to set an offset on row key expressions to skip * the region start key. */ offsetToBe = region.getRegionInfo().getStartKey().length != 0 ? region.getRegionInfo().getStartKey().length : region.getRegionInfo().getEndKey().length; ScanUtil.setRowKeyOffset(scan, offsetToBe); } final int offset = offsetToBe; PTable projectedTable = null; PTable writeToTable = null; byte[][] values = null; byte[] descRowKeyTableBytes = scan.getAttribute(UPGRADE_DESC_ROW_KEY); boolean isDescRowKeyOrderUpgrade = descRowKeyTableBytes != null; if (isDescRowKeyOrderUpgrade) { logger.debug("Upgrading row key for " + region.getRegionInfo().getTable().getNameAsString()); projectedTable = deserializeTable(descRowKeyTableBytes); try { writeToTable = PTableImpl.makePTable(projectedTable, true); } catch (SQLException e) { ServerUtil.throwIOException("Upgrade failed", e); // Impossible } values = new byte[projectedTable.getPKColumns().size()][]; } byte[] localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD); List<IndexMaintainer> indexMaintainers = localIndexBytes == null ? null : IndexMaintainer.deserialize(localIndexBytes); List<Mutation> indexMutations = localIndexBytes == null ? Collections.<Mutation>emptyList() : Lists.<Mutation>newArrayListWithExpectedSize(1024); RegionScanner theScanner = s; byte[] indexUUID = scan.getAttribute(PhoenixIndexCodec.INDEX_UUID); List<Expression> selectExpressions = null; byte[] upsertSelectTable = scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_TABLE); boolean isUpsert = false; boolean isDelete = false; byte[] deleteCQ = null; byte[] deleteCF = null; byte[] emptyCF = null; ImmutableBytesWritable ptr = new ImmutableBytesWritable(); if (upsertSelectTable != null) { isUpsert = true; projectedTable = deserializeTable(upsertSelectTable); selectExpressions = deserializeExpressions(scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_EXPRS)); values = new byte[projectedTable.getPKColumns().size()][]; } else { byte[] isDeleteAgg = scan.getAttribute(BaseScannerRegionObserver.DELETE_AGG); isDelete = isDeleteAgg != null && Bytes.compareTo(PDataType.TRUE_BYTES, isDeleteAgg) == 0; if (!isDelete) { deleteCF = scan.getAttribute(BaseScannerRegionObserver.DELETE_CF); deleteCQ = scan.getAttribute(BaseScannerRegionObserver.DELETE_CQ); } emptyCF = scan.getAttribute(BaseScannerRegionObserver.EMPTY_CF); } TupleProjector tupleProjector = null; byte[][] viewConstants = null; ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan); final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan); final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan); if ((localIndexScan && !isDelete && !isDescRowKeyOrderUpgrade) || (j == null && p != null)) { if (dataColumns != null) { tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns); viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan); } ImmutableBytesWritable tempPtr = new ImmutableBytesWritable(); theScanner = getWrappedScanner(c, theScanner, offset, scan, dataColumns, tupleProjector, region, indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr); } if (j != null) { theScanner = new HashJoinRegionScanner(theScanner, p, j, ScanUtil.getTenantId(scan), env); } int batchSize = 0; List<Mutation> mutations = Collections.emptyList(); boolean needToWrite = false; Configuration conf = c.getEnvironment().getConfiguration(); long flushSize = region.getTableDesc().getMemStoreFlushSize(); if (flushSize <= 0) { flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE); } /** * Upper bound of memstore size allowed for region. Updates will be blocked until the flush * happen if the memstore reaches this threshold. */ final long blockingMemStoreSize = flushSize * ( conf.getLong(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER)-1) ; boolean buildLocalIndex = indexMaintainers != null && dataColumns==null && !localIndexScan; if (isDescRowKeyOrderUpgrade || isDelete || isUpsert || (deleteCQ != null && deleteCF != null) || emptyCF != null || buildLocalIndex) { needToWrite = true; // TODO: size better mutations = Lists.newArrayListWithExpectedSize(1024); batchSize = env.getConfiguration().getInt(MUTATE_BATCH_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE); } Aggregators aggregators = ServerAggregators.deserialize( scan.getAttribute(BaseScannerRegionObserver.AGGREGATORS), env.getConfiguration()); Aggregator[] rowAggregators = aggregators.getAggregators(); boolean hasMore; boolean hasAny = false; MultiKeyValueTuple result = new MultiKeyValueTuple(); if (logger.isDebugEnabled()) { logger.debug(LogUtil.addCustomAnnotations("Starting ungrouped coprocessor scan " + scan + " "+region.getRegionInfo(), ScanUtil.getCustomAnnotations(scan))); } long rowCount = 0; final RegionScanner innerScanner = theScanner; boolean acquiredLock = false; try { if(needToWrite) { synchronized (lock) { scansReferenceCount++; } } region.startRegionOperation(); acquiredLock = true; synchronized (innerScanner) { do { List<Cell> results = new ArrayList<Cell>(); // Results are potentially returned even when the return value of s.next is false // since this is an indication of whether or not there are more values after the // ones returned hasMore = innerScanner.nextRaw(results); if (!results.isEmpty()) { rowCount++; result.setKeyValues(results); try { if (isDescRowKeyOrderUpgrade) { Arrays.fill(values, null); Cell firstKV = results.get(0); RowKeySchema schema = projectedTable.getRowKeySchema(); int maxOffset = schema.iterator(firstKV.getRowArray(), firstKV.getRowOffset() + offset, firstKV.getRowLength(), ptr); for (int i = 0; i < schema.getFieldCount(); i++) { Boolean hasValue = schema.next(ptr, i, maxOffset); if (hasValue == null) { break; } Field field = schema.getField(i); if (field.getSortOrder() == SortOrder.DESC) { // Special case for re-writing DESC ARRAY, as the actual byte value needs to change in this case if (field.getDataType().isArrayType()) { field.getDataType().coerceBytes(ptr, null, field.getDataType(), field.getMaxLength(), field.getScale(), field.getSortOrder(), field.getMaxLength(), field.getScale(), field.getSortOrder(), true); // force to use correct separator byte } // Special case for re-writing DESC CHAR or DESC BINARY, to force the re-writing of trailing space characters else if (field.getDataType() == PChar.INSTANCE || field.getDataType() == PBinary.INSTANCE) { int len = ptr.getLength(); while (len > 0 && ptr.get()[ptr.getOffset() + len - 1] == StringUtil.SPACE_UTF8) { len--; } ptr.set(ptr.get(), ptr.getOffset(), len); // Special case for re-writing DESC FLOAT and DOUBLE, as they're not inverted like they should be (PHOENIX-2171) } else if (field.getDataType() == PFloat.INSTANCE || field.getDataType() == PDouble.INSTANCE) { byte[] invertedBytes = SortOrder.invert(ptr.get(), ptr.getOffset(), ptr.getLength()); ptr.set(invertedBytes); } } else if (field.getDataType() == PBinary.INSTANCE) { // Remove trailing space characters so that the setValues call below will replace them // with the correct zero byte character. Note this is somewhat dangerous as these // could be legit, but I don't know what the alternative is. int len = ptr.getLength(); while (len > 0 && ptr.get()[ptr.getOffset() + len - 1] == StringUtil.SPACE_UTF8) { len--; } ptr.set(ptr.get(), ptr.getOffset(), len); } values[i] = ptr.copyBytes(); } writeToTable.newKey(ptr, values); if (Bytes.compareTo( firstKV.getRowArray(), firstKV.getRowOffset() + offset, firstKV.getRowLength(), ptr.get(),ptr.getOffset() + offset,ptr.getLength()) == 0) { continue; } byte[] newRow = ByteUtil.copyKeyBytesIfNecessary(ptr); if (offset > 0) { // for local indexes (prepend region start key) byte[] newRowWithOffset = new byte[offset + newRow.length]; System.arraycopy(firstKV.getRowArray(), firstKV.getRowOffset(), newRowWithOffset, 0, offset);; System.arraycopy(newRow, 0, newRowWithOffset, offset, newRow.length); newRow = newRowWithOffset; } byte[] oldRow = Bytes.copy(firstKV.getRowArray(), firstKV.getRowOffset(), firstKV.getRowLength()); for (Cell cell : results) { // Copy existing cell but with new row key Cell newCell = new KeyValue(newRow, 0, newRow.length, cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), cell.getTimestamp(), KeyValue.Type.codeToType(cell.getTypeByte()), cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); switch (KeyValue.Type.codeToType(cell.getTypeByte())) { case Put: // If Put, point delete old Put Delete del = new Delete(oldRow); del.addDeleteMarker(new KeyValue(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), cell.getTimestamp(), KeyValue.Type.Delete, ByteUtil.EMPTY_BYTE_ARRAY, 0, 0)); mutations.add(del); Put put = new Put(newRow); put.add(newCell); mutations.add(put); break; case Delete: case DeleteColumn: case DeleteFamily: case DeleteFamilyVersion: Delete delete = new Delete(newRow); delete.addDeleteMarker(newCell); mutations.add(delete); break; } } } else if (buildLocalIndex) { for (IndexMaintainer maintainer : indexMaintainers) { if (!results.isEmpty()) { result.getKey(ptr); ValueGetter valueGetter = maintainer.createGetterFromKeyValues( ImmutableBytesPtr.copyBytesIfNecessary(ptr), results); Put put = maintainer.buildUpdateMutation(kvBuilder, valueGetter, ptr, results.get(0).getTimestamp(), env.getRegion().getRegionInfo().getStartKey(), env.getRegion().getRegionInfo().getEndKey()); indexMutations.add(put); } } result.setKeyValues(results); } else if (isDelete) { // FIXME: the version of the Delete constructor without the lock // args was introduced in 0.94.4, thus if we try to use it here // we can no longer use the 0.94.2 version of the client. Cell firstKV = results.get(0); Delete delete = new Delete(firstKV.getRowArray(), firstKV.getRowOffset(), firstKV.getRowLength(),ts); mutations.add(delete); // force tephra to ignore this deletes delete.setAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]); } else if (isUpsert) { Arrays.fill(values, null); int i = 0; List<PColumn> projectedColumns = projectedTable.getColumns(); for (; i < projectedTable.getPKColumns().size(); i++) { Expression expression = selectExpressions.get(i); if (expression.evaluate(result, ptr)) { values[i] = ptr.copyBytes(); // If SortOrder from expression in SELECT doesn't match the // column being projected into then invert the bits. if (expression.getSortOrder() != projectedColumns.get(i).getSortOrder()) { SortOrder.invert(values[i], 0, values[i], 0, values[i].length); } } } projectedTable.newKey(ptr, values); PRow row = projectedTable.newRow(kvBuilder, ts, ptr); for (; i < projectedColumns.size(); i++) { Expression expression = selectExpressions.get(i); if (expression.evaluate(result, ptr)) { PColumn column = projectedColumns.get(i); Object value = expression.getDataType() .toObject(ptr, column.getSortOrder()); // We are guaranteed that the two column will have the // same type. if (!column.getDataType().isSizeCompatible(ptr, value, column.getDataType(), expression.getMaxLength(), expression.getScale(), column.getMaxLength(), column.getScale())) { throw new DataExceedsCapacityException( column.getDataType(), column.getMaxLength(), column.getScale()); } column.getDataType().coerceBytes(ptr, value, expression.getDataType(), expression.getMaxLength(), expression.getScale(), expression.getSortOrder(), column.getMaxLength(), column.getScale(), column.getSortOrder(), projectedTable.rowKeyOrderOptimizable()); byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr); row.setValue(column, bytes); } } for (Mutation mutation : row.toRowMutations()) { mutations.add(mutation); } for (i = 0; i < selectExpressions.size(); i++) { selectExpressions.get(i).reset(); } } else if (deleteCF != null && deleteCQ != null) { // No need to search for delete column, since we project only it // if no empty key value is being set if (emptyCF == null || result.getValue(deleteCF, deleteCQ) != null) { Delete delete = new Delete(results.get(0).getRowArray(), results.get(0).getRowOffset(), results.get(0).getRowLength()); delete.deleteColumns(deleteCF, deleteCQ, ts); // force tephra to ignore this deletes delete.setAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]); mutations.add(delete); } } if (emptyCF != null) { /* * If we've specified an emptyCF, then we need to insert an empty * key value "retroactively" for any key value that is visible at * the timestamp that the DDL was issued. Key values that are not * visible at this timestamp will not ever be projected up to * scans past this timestamp, so don't need to be considered. * We insert one empty key value per row per timestamp. */ Set<Long> timeStamps = Sets.newHashSetWithExpectedSize(results.size()); for (Cell kv : results) { long kvts = kv.getTimestamp(); if (!timeStamps.contains(kvts)) { Put put = new Put(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength()); put.add(emptyCF, QueryConstants.EMPTY_COLUMN_BYTES, kvts, ByteUtil.EMPTY_BYTE_ARRAY); mutations.add(put); } } } // Commit in batches based on UPSERT_BATCH_SIZE_ATTRIB in config if (!mutations.isEmpty() && batchSize > 0 && mutations.size() % batchSize == 0) { commitBatch(region, mutations, indexUUID, blockingMemStoreSize); mutations.clear(); } // Commit in batches based on UPSERT_BATCH_SIZE_ATTRIB in config if (!indexMutations.isEmpty() && batchSize > 0 && indexMutations.size() % batchSize == 0) { commitBatch(region, indexMutations, null, blockingMemStoreSize); indexMutations.clear(); } } catch (ConstraintViolationException e) { // Log and ignore in count logger.error(LogUtil.addCustomAnnotations("Failed to create row in " + region.getRegionInfo().getRegionNameAsString() + " with values " + SchemaUtil.toString(values), ScanUtil.getCustomAnnotations(scan)), e); continue; } aggregators.aggregate(rowAggregators, result); hasAny = true; } } while (hasMore); if (!mutations.isEmpty()) { commitBatch(region,mutations, indexUUID, blockingMemStoreSize); } if (!indexMutations.isEmpty()) { commitBatch(region,indexMutations, null, blockingMemStoreSize); indexMutations.clear(); } } } finally { if(needToWrite) { synchronized (lock) { scansReferenceCount--; } } try { innerScanner.close(); } finally { if (acquiredLock) region.closeRegionOperation(); } } if (logger.isDebugEnabled()) { logger.debug(LogUtil.addCustomAnnotations("Finished scanning " + rowCount + " rows for ungrouped coprocessor scan " + scan, ScanUtil.getCustomAnnotations(scan))); } final boolean hadAny = hasAny; KeyValue keyValue = null; if (hadAny) { byte[] value = aggregators.toBytes(rowAggregators); keyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length); } final KeyValue aggKeyValue = keyValue; RegionScanner scanner = new BaseRegionScanner(innerScanner) { private boolean done = !hadAny; @Override public boolean isFilterDone() { return done; } @Override public boolean next(List<Cell> results) throws IOException { if (done) return false; done = true; results.add(aggKeyValue); return false; } @Override public long getMaxResultSize() { return scan.getMaxResultSize(); } }; return scanner; } #location 404 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testUpsertDeleteWithOldClient() throws Exception { // Insert data with old client and read with new client executeQueryWithClientVersion(compatibleClientVersion, CREATE_ADD); executeQueriesWithCurrentVersion(QUERY); assertExpectedOutput(CREATE_ADD, QUERY); // Deletes with the old client executeQueryWithClientVersion(compatibleClientVersion, ADD_DELETE); executeQueryWithClientVersion(compatibleClientVersion, QUERY_ADD_DELETE); assertExpectedOutput(ADD_DELETE, QUERY_ADD_DELETE); }
#vulnerable code @Test public void testUpsertDeleteWithOldClient() throws Exception { checkForPreConditions(); // Insert data with old client and read with new client executeQueryWithClientVersion(compatibleClientVersion, CREATE_ADD); executeQueriesWithCurrentVersion(QUERY); assertTrue(compareOutput(CREATE_ADD, QUERY)); // Deletes with the old client executeQueryWithClientVersion(compatibleClientVersion, ADD_DELETE); executeQueryWithClientVersion(compatibleClientVersion, QUERY_ADD_DELETE); assertTrue(compareOutput(ADD_DELETE, QUERY_ADD_DELETE)); } #location 3 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public boolean isFilterDone() { return false; }
#vulnerable code @Override public boolean isFilterDone() { return hasMore; } #location 2 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @SuppressWarnings("unchecked") protected QueryPlan compileJoinQuery(StatementContext context, List<Object> binds, JoinTable joinTable, boolean asSubquery, boolean projectPKColumns, List<OrderByNode> orderBy) throws SQLException { byte[] emptyByteArray = new byte[0]; List<JoinSpec> joinSpecs = joinTable.getJoinSpecs(); if (joinSpecs.isEmpty()) { Table table = joinTable.getTable(); SelectStatement subquery = table.getAsSubquery(orderBy); if (!table.isSubselect()) { context.setCurrentTable(table.getTableRef()); PTable projectedTable = table.createProjectedTable(!projectPKColumns, context); TupleProjector projector = new TupleProjector(projectedTable); TupleProjector.serializeProjectorIntoScan(context.getScan(), projector); context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), subquery.getUdfParseNodes())); table.projectColumns(context.getScan()); return compileSingleFlatQuery(context, subquery, binds, asSubquery, !asSubquery, null, projectPKColumns ? projector : null, true); } QueryPlan plan = compileSubquery(subquery, false); PTable projectedTable = table.createProjectedTable(plan.getProjector()); context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), subquery.getUdfParseNodes())); return new TupleProjectionPlan(plan, new TupleProjector(plan.getProjector()), table.compilePostFilterExpression(context)); } boolean[] starJoinVector; if (!this.useSortMergeJoin && (starJoinVector = joinTable.getStarJoinVector()) != null) { Table table = joinTable.getTable(); PTable initialProjectedTable; TableRef tableRef; SelectStatement query; TupleProjector tupleProjector; if (!table.isSubselect()) { context.setCurrentTable(table.getTableRef()); initialProjectedTable = table.createProjectedTable(!projectPKColumns, context); tableRef = table.getTableRef(); table.projectColumns(context.getScan()); query = joinTable.getAsSingleSubquery(table.getAsSubquery(orderBy), asSubquery); tupleProjector = new TupleProjector(initialProjectedTable); } else { SelectStatement subquery = table.getAsSubquery(orderBy); QueryPlan plan = compileSubquery(subquery, false); initialProjectedTable = table.createProjectedTable(plan.getProjector()); tableRef = plan.getTableRef(); context.getScan().setFamilyMap(plan.getContext().getScan().getFamilyMap()); query = joinTable.getAsSingleSubquery((SelectStatement) plan.getStatement(), asSubquery); tupleProjector = new TupleProjector(plan.getProjector()); } context.setCurrentTable(tableRef); PTable projectedTable = initialProjectedTable; int count = joinSpecs.size(); ImmutableBytesPtr[] joinIds = new ImmutableBytesPtr[count]; List<Expression>[] joinExpressions = new List[count]; JoinType[] joinTypes = new JoinType[count]; PTable[] tables = new PTable[count]; int[] fieldPositions = new int[count]; HashSubPlan[] subPlans = new HashSubPlan[count]; fieldPositions[0] = projectedTable.getColumns().size() - projectedTable.getPKColumns().size(); for (int i = 0; i < count; i++) { JoinSpec joinSpec = joinSpecs.get(i); Scan subScan = ScanUtil.newScan(originalScan); StatementContext subContext = new StatementContext(statement, context.getResolver(), subScan, new SequenceManager(statement)); QueryPlan joinPlan = compileJoinQuery(subContext, binds, joinSpec.getJoinTable(), true, true, null); boolean hasPostReference = joinSpec.getJoinTable().hasPostReference(); if (hasPostReference) { tables[i] = subContext.getResolver().getTables().get(0).getTable(); projectedTable = JoinCompiler.joinProjectedTables(projectedTable, tables[i], joinSpec.getType()); } else { tables[i] = null; } context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), query.getUdfParseNodes())); joinIds[i] = new ImmutableBytesPtr(emptyByteArray); // place-holder Pair<List<Expression>, List<Expression>> joinConditions = joinSpec.compileJoinConditions(context, subContext, true); joinExpressions[i] = joinConditions.getFirst(); List<Expression> hashExpressions = joinConditions.getSecond(); Pair<Expression, Expression> keyRangeExpressions = new Pair<Expression, Expression>(null, null); boolean optimized = getKeyExpressionCombinations(keyRangeExpressions, context, joinTable.getStatement(), tableRef, joinSpec.getType(), joinExpressions[i], hashExpressions); Expression keyRangeLhsExpression = keyRangeExpressions.getFirst(); Expression keyRangeRhsExpression = keyRangeExpressions.getSecond(); joinTypes[i] = joinSpec.getType(); if (i < count - 1) { fieldPositions[i + 1] = fieldPositions[i] + (tables[i] == null ? 0 : (tables[i].getColumns().size() - tables[i].getPKColumns().size())); } subPlans[i] = new HashSubPlan(i, joinPlan, optimized ? null : hashExpressions, joinSpec.isSingleValueOnly(), keyRangeLhsExpression, keyRangeRhsExpression); } TupleProjector.serializeProjectorIntoScan(context.getScan(), tupleProjector); QueryPlan plan = compileSingleFlatQuery(context, query, binds, asSubquery, !asSubquery && joinTable.isAllLeftJoin(), null, !table.isSubselect() && projectPKColumns ? tupleProjector : null, true); Expression postJoinFilterExpression = joinTable.compilePostFilterExpression(context, table); Integer limit = null; if (!query.isAggregate() && !query.isDistinct() && query.getOrderBy().isEmpty()) { limit = plan.getLimit(); } HashJoinInfo joinInfo = new HashJoinInfo(projectedTable, joinIds, joinExpressions, joinTypes, starJoinVector, tables, fieldPositions, postJoinFilterExpression, limit); return HashJoinPlan.create(joinTable.getStatement(), plan, joinInfo, subPlans); } JoinSpec lastJoinSpec = joinSpecs.get(joinSpecs.size() - 1); JoinType type = lastJoinSpec.getType(); if (!this.useSortMergeJoin && (type == JoinType.Right || type == JoinType.Inner) && lastJoinSpec.getJoinTable().getJoinSpecs().isEmpty() && lastJoinSpec.getJoinTable().getTable().isFlat()) { JoinTable rhsJoinTable = lastJoinSpec.getJoinTable(); Table rhsTable = rhsJoinTable.getTable(); JoinTable lhsJoin = joinTable.getSubJoinTableWithoutPostFilters(); Scan subScan = ScanUtil.newScan(originalScan); StatementContext lhsCtx = new StatementContext(statement, context.getResolver(), subScan, new SequenceManager(statement)); QueryPlan lhsPlan = compileJoinQuery(lhsCtx, binds, lhsJoin, true, true, null); PTable rhsProjTable; TableRef rhsTableRef; SelectStatement rhs; TupleProjector tupleProjector; if (!rhsTable.isSubselect()) { context.setCurrentTable(rhsTable.getTableRef()); rhsProjTable = rhsTable.createProjectedTable(!projectPKColumns, context); rhsTableRef = rhsTable.getTableRef(); rhsTable.projectColumns(context.getScan()); rhs = rhsJoinTable.getAsSingleSubquery(rhsTable.getAsSubquery(orderBy), asSubquery); tupleProjector = new TupleProjector(rhsProjTable); } else { SelectStatement subquery = rhsTable.getAsSubquery(orderBy); QueryPlan plan = compileSubquery(subquery, false); rhsProjTable = rhsTable.createProjectedTable(plan.getProjector()); rhsTableRef = plan.getTableRef(); context.getScan().setFamilyMap(plan.getContext().getScan().getFamilyMap()); rhs = rhsJoinTable.getAsSingleSubquery((SelectStatement) plan.getStatement(), asSubquery); tupleProjector = new TupleProjector(plan.getProjector()); } context.setCurrentTable(rhsTableRef); context.setResolver(FromCompiler.getResolverForProjectedTable(rhsProjTable, context.getConnection(), rhs.getUdfParseNodes())); ImmutableBytesPtr[] joinIds = new ImmutableBytesPtr[] {new ImmutableBytesPtr(emptyByteArray)}; Pair<List<Expression>, List<Expression>> joinConditions = lastJoinSpec.compileJoinConditions(lhsCtx, context, true); List<Expression> joinExpressions = joinConditions.getSecond(); List<Expression> hashExpressions = joinConditions.getFirst(); boolean needsMerge = lhsJoin.hasPostReference(); PTable lhsTable = needsMerge ? lhsCtx.getResolver().getTables().get(0).getTable() : null; int fieldPosition = needsMerge ? rhsProjTable.getColumns().size() - rhsProjTable.getPKColumns().size() : 0; PTable projectedTable = needsMerge ? JoinCompiler.joinProjectedTables(rhsProjTable, lhsTable, type == JoinType.Right ? JoinType.Left : type) : rhsProjTable; TupleProjector.serializeProjectorIntoScan(context.getScan(), tupleProjector); context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), rhs.getUdfParseNodes())); QueryPlan rhsPlan = compileSingleFlatQuery(context, rhs, binds, asSubquery, !asSubquery && type == JoinType.Right, null, !rhsTable.isSubselect() && projectPKColumns ? tupleProjector : null, true); Expression postJoinFilterExpression = joinTable.compilePostFilterExpression(context, rhsTable); Integer limit = null; if (!rhs.isAggregate() && !rhs.isDistinct() && rhs.getOrderBy().isEmpty()) { limit = rhsPlan.getLimit(); } HashJoinInfo joinInfo = new HashJoinInfo(projectedTable, joinIds, new List[] {joinExpressions}, new JoinType[] {type == JoinType.Right ? JoinType.Left : type}, new boolean[] {true}, new PTable[] {lhsTable}, new int[] {fieldPosition}, postJoinFilterExpression, limit); Pair<Expression, Expression> keyRangeExpressions = new Pair<Expression, Expression>(null, null); getKeyExpressionCombinations(keyRangeExpressions, context, joinTable.getStatement(), rhsTableRef, type, joinExpressions, hashExpressions); return HashJoinPlan.create(joinTable.getStatement(), rhsPlan, joinInfo, new HashSubPlan[] {new HashSubPlan(0, lhsPlan, hashExpressions, false, keyRangeExpressions.getFirst(), keyRangeExpressions.getSecond())}); } JoinTable lhsJoin = joinTable.getSubJoinTableWithoutPostFilters(); JoinTable rhsJoin = lastJoinSpec.getJoinTable(); if (type == JoinType.Right) { JoinTable temp = lhsJoin; lhsJoin = rhsJoin; rhsJoin = temp; } List<EqualParseNode> joinConditionNodes = lastJoinSpec.getOnConditions(); List<OrderByNode> lhsOrderBy = Lists.<OrderByNode> newArrayListWithExpectedSize(joinConditionNodes.size()); List<OrderByNode> rhsOrderBy = Lists.<OrderByNode> newArrayListWithExpectedSize(joinConditionNodes.size()); for (EqualParseNode condition : joinConditionNodes) { lhsOrderBy.add(NODE_FACTORY.orderBy(type == JoinType.Right ? condition.getRHS() : condition.getLHS(), false, true)); rhsOrderBy.add(NODE_FACTORY.orderBy(type == JoinType.Right ? condition.getLHS() : condition.getRHS(), false, true)); } Scan lhsScan = ScanUtil.newScan(originalScan); StatementContext lhsCtx = new StatementContext(statement, context.getResolver(), lhsScan, new SequenceManager(statement)); boolean preserveRowkey = !projectPKColumns && type != JoinType.Full; QueryPlan lhsPlan = compileJoinQuery(lhsCtx, binds, lhsJoin, true, !preserveRowkey, lhsOrderBy); PTable lhsProjTable = lhsCtx.getResolver().getTables().get(0).getTable(); boolean isInRowKeyOrder = preserveRowkey && lhsPlan.getOrderBy().getOrderByExpressions().isEmpty(); Scan rhsScan = ScanUtil.newScan(originalScan); StatementContext rhsCtx = new StatementContext(statement, context.getResolver(), rhsScan, new SequenceManager(statement)); QueryPlan rhsPlan = compileJoinQuery(rhsCtx, binds, rhsJoin, true, true, rhsOrderBy); PTable rhsProjTable = rhsCtx.getResolver().getTables().get(0).getTable(); Pair<List<Expression>, List<Expression>> joinConditions = lastJoinSpec.compileJoinConditions(type == JoinType.Right ? rhsCtx : lhsCtx, type == JoinType.Right ? lhsCtx : rhsCtx, false); List<Expression> lhsKeyExpressions = type == JoinType.Right ? joinConditions.getSecond() : joinConditions.getFirst(); List<Expression> rhsKeyExpressions = type == JoinType.Right ? joinConditions.getFirst() : joinConditions.getSecond(); boolean needsMerge = rhsJoin.hasPostReference(); int fieldPosition = needsMerge ? lhsProjTable.getColumns().size() - lhsProjTable.getPKColumns().size() : 0; PTable projectedTable = needsMerge ? JoinCompiler.joinProjectedTables(lhsProjTable, rhsProjTable, type == JoinType.Right ? JoinType.Left : type) : lhsProjTable; ColumnResolver resolver = FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), new HashMap<String,UDFParseNode>(1)); TableRef tableRef = resolver.getTables().get(0); StatementContext subCtx = new StatementContext(statement, resolver, ScanUtil.newScan(originalScan), new SequenceManager(statement)); subCtx.setCurrentTable(tableRef); QueryPlan innerPlan = new SortMergeJoinPlan(subCtx, joinTable.getStatement(), tableRef, type == JoinType.Right ? JoinType.Left : type, lhsPlan, rhsPlan, lhsKeyExpressions, rhsKeyExpressions, projectedTable, lhsProjTable, needsMerge ? rhsProjTable : null, fieldPosition, lastJoinSpec.isSingleValueOnly()); context.setCurrentTable(tableRef); context.setResolver(resolver); TableNode from = NODE_FACTORY.namedTable(tableRef.getTableAlias(), NODE_FACTORY.table(tableRef.getTable().getSchemaName().getString(), tableRef.getTable().getTableName().getString())); ParseNode where = joinTable.getPostFiltersCombined(); SelectStatement select = asSubquery ? NODE_FACTORY.select(from, joinTable.getStatement().getHint(), false, Collections.<AliasedNode> emptyList(), where, null, null, orderBy, null, 0, false, joinTable.getStatement().hasSequence(), Collections.<SelectStatement>emptyList(), joinTable.getStatement().getUdfParseNodes()) : NODE_FACTORY.select(joinTable.getStatement(), from, where); return compileSingleFlatQuery(context, select, binds, asSubquery, false, innerPlan, null, isInRowKeyOrder); }
#vulnerable code @SuppressWarnings("unchecked") protected QueryPlan compileJoinQuery(StatementContext context, List<Object> binds, JoinTable joinTable, boolean asSubquery, boolean projectPKColumns, List<OrderByNode> orderBy) throws SQLException { byte[] emptyByteArray = new byte[0]; List<JoinSpec> joinSpecs = joinTable.getJoinSpecs(); if (joinSpecs.isEmpty()) { Table table = joinTable.getTable(); SelectStatement subquery = table.getAsSubquery(orderBy); if (!table.isSubselect()) { context.setCurrentTable(table.getTableRef()); PTable projectedTable = table.createProjectedTable(!projectPKColumns, context); TupleProjector.serializeProjectorIntoScan(context.getScan(), new TupleProjector(projectedTable)); context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), subquery.getUdfParseNodes())); table.projectColumns(context.getScan()); return compileSingleQuery(context, subquery, binds, asSubquery, !asSubquery); } QueryPlan plan = compileSubquery(subquery, false); PTable projectedTable = table.createProjectedTable(plan.getProjector()); context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), subquery.getUdfParseNodes())); return new TupleProjectionPlan(plan, new TupleProjector(plan.getProjector()), table.compilePostFilterExpression(context)); } boolean[] starJoinVector; if (!this.useSortMergeJoin && (starJoinVector = joinTable.getStarJoinVector()) != null) { Table table = joinTable.getTable(); PTable initialProjectedTable; TableRef tableRef; SelectStatement query; TupleProjector tupleProjector; if (!table.isSubselect()) { context.setCurrentTable(table.getTableRef()); initialProjectedTable = table.createProjectedTable(!projectPKColumns, context); tableRef = table.getTableRef(); table.projectColumns(context.getScan()); query = joinTable.getAsSingleSubquery(table.getAsSubquery(orderBy), asSubquery); tupleProjector = new TupleProjector(initialProjectedTable); } else { SelectStatement subquery = table.getAsSubquery(orderBy); QueryPlan plan = compileSubquery(subquery, false); initialProjectedTable = table.createProjectedTable(plan.getProjector()); tableRef = plan.getTableRef(); context.getScan().setFamilyMap(plan.getContext().getScan().getFamilyMap()); query = joinTable.getAsSingleSubquery((SelectStatement) plan.getStatement(), asSubquery); tupleProjector = new TupleProjector(plan.getProjector()); } context.setCurrentTable(tableRef); PTable projectedTable = initialProjectedTable; int count = joinSpecs.size(); ImmutableBytesPtr[] joinIds = new ImmutableBytesPtr[count]; List<Expression>[] joinExpressions = new List[count]; JoinType[] joinTypes = new JoinType[count]; PTable[] tables = new PTable[count]; int[] fieldPositions = new int[count]; HashSubPlan[] subPlans = new HashSubPlan[count]; fieldPositions[0] = projectedTable.getColumns().size() - projectedTable.getPKColumns().size(); for (int i = 0; i < count; i++) { JoinSpec joinSpec = joinSpecs.get(i); Scan subScan = ScanUtil.newScan(originalScan); StatementContext subContext = new StatementContext(statement, context.getResolver(), subScan, new SequenceManager(statement)); QueryPlan joinPlan = compileJoinQuery(subContext, binds, joinSpec.getJoinTable(), true, true, null); boolean hasPostReference = joinSpec.getJoinTable().hasPostReference(); if (hasPostReference) { tables[i] = subContext.getResolver().getTables().get(0).getTable(); projectedTable = JoinCompiler.joinProjectedTables(projectedTable, tables[i], joinSpec.getType()); } else { tables[i] = null; } context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), query.getUdfParseNodes())); joinIds[i] = new ImmutableBytesPtr(emptyByteArray); // place-holder Pair<List<Expression>, List<Expression>> joinConditions = joinSpec.compileJoinConditions(context, subContext, true); joinExpressions[i] = joinConditions.getFirst(); List<Expression> hashExpressions = joinConditions.getSecond(); Pair<Expression, Expression> keyRangeExpressions = new Pair<Expression, Expression>(null, null); boolean optimized = getKeyExpressionCombinations(keyRangeExpressions, context, joinTable.getStatement(), tableRef, joinSpec.getType(), joinExpressions[i], hashExpressions); Expression keyRangeLhsExpression = keyRangeExpressions.getFirst(); Expression keyRangeRhsExpression = keyRangeExpressions.getSecond(); joinTypes[i] = joinSpec.getType(); if (i < count - 1) { fieldPositions[i + 1] = fieldPositions[i] + (tables[i] == null ? 0 : (tables[i].getColumns().size() - tables[i].getPKColumns().size())); } subPlans[i] = new HashSubPlan(i, joinPlan, optimized ? null : hashExpressions, joinSpec.isSingleValueOnly(), keyRangeLhsExpression, keyRangeRhsExpression); } TupleProjector.serializeProjectorIntoScan(context.getScan(), tupleProjector); QueryPlan plan = compileSingleQuery(context, query, binds, asSubquery, !asSubquery && joinTable.isAllLeftJoin()); Expression postJoinFilterExpression = joinTable.compilePostFilterExpression(context, table); Integer limit = null; if (!query.isAggregate() && !query.isDistinct() && query.getOrderBy().isEmpty()) { limit = plan.getLimit(); } HashJoinInfo joinInfo = new HashJoinInfo(projectedTable, joinIds, joinExpressions, joinTypes, starJoinVector, tables, fieldPositions, postJoinFilterExpression, limit); return HashJoinPlan.create(joinTable.getStatement(), plan, joinInfo, subPlans); } JoinSpec lastJoinSpec = joinSpecs.get(joinSpecs.size() - 1); JoinType type = lastJoinSpec.getType(); if (!this.useSortMergeJoin && (type == JoinType.Right || type == JoinType.Inner) && lastJoinSpec.getJoinTable().getJoinSpecs().isEmpty() && lastJoinSpec.getJoinTable().getTable().isFlat()) { JoinTable rhsJoinTable = lastJoinSpec.getJoinTable(); Table rhsTable = rhsJoinTable.getTable(); JoinTable lhsJoin = joinTable.getSubJoinTableWithoutPostFilters(); Scan subScan = ScanUtil.newScan(originalScan); StatementContext lhsCtx = new StatementContext(statement, context.getResolver(), subScan, new SequenceManager(statement)); QueryPlan lhsPlan = compileJoinQuery(lhsCtx, binds, lhsJoin, true, true, null); PTable rhsProjTable; TableRef rhsTableRef; SelectStatement rhs; TupleProjector tupleProjector; if (!rhsTable.isSubselect()) { context.setCurrentTable(rhsTable.getTableRef()); rhsProjTable = rhsTable.createProjectedTable(!projectPKColumns, context); rhsTableRef = rhsTable.getTableRef(); rhsTable.projectColumns(context.getScan()); rhs = rhsJoinTable.getAsSingleSubquery(rhsTable.getAsSubquery(orderBy), asSubquery); tupleProjector = new TupleProjector(rhsProjTable); } else { SelectStatement subquery = rhsTable.getAsSubquery(orderBy); QueryPlan plan = compileSubquery(subquery, false); rhsProjTable = rhsTable.createProjectedTable(plan.getProjector()); rhsTableRef = plan.getTableRef(); context.getScan().setFamilyMap(plan.getContext().getScan().getFamilyMap()); rhs = rhsJoinTable.getAsSingleSubquery((SelectStatement) plan.getStatement(), asSubquery); tupleProjector = new TupleProjector(plan.getProjector()); } context.setCurrentTable(rhsTableRef); context.setResolver(FromCompiler.getResolverForProjectedTable(rhsProjTable, context.getConnection(), rhs.getUdfParseNodes())); ImmutableBytesPtr[] joinIds = new ImmutableBytesPtr[] {new ImmutableBytesPtr(emptyByteArray)}; Pair<List<Expression>, List<Expression>> joinConditions = lastJoinSpec.compileJoinConditions(lhsCtx, context, true); List<Expression> joinExpressions = joinConditions.getSecond(); List<Expression> hashExpressions = joinConditions.getFirst(); boolean needsMerge = lhsJoin.hasPostReference(); PTable lhsTable = needsMerge ? lhsCtx.getResolver().getTables().get(0).getTable() : null; int fieldPosition = needsMerge ? rhsProjTable.getColumns().size() - rhsProjTable.getPKColumns().size() : 0; PTable projectedTable = needsMerge ? JoinCompiler.joinProjectedTables(rhsProjTable, lhsTable, type == JoinType.Right ? JoinType.Left : type) : rhsProjTable; TupleProjector.serializeProjectorIntoScan(context.getScan(), tupleProjector); context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), rhs.getUdfParseNodes())); QueryPlan rhsPlan = compileSingleQuery(context, rhs, binds, asSubquery, !asSubquery && type == JoinType.Right); Expression postJoinFilterExpression = joinTable.compilePostFilterExpression(context, rhsTable); Integer limit = null; if (!rhs.isAggregate() && !rhs.isDistinct() && rhs.getOrderBy().isEmpty()) { limit = rhsPlan.getLimit(); } HashJoinInfo joinInfo = new HashJoinInfo(projectedTable, joinIds, new List[] {joinExpressions}, new JoinType[] {type == JoinType.Right ? JoinType.Left : type}, new boolean[] {true}, new PTable[] {lhsTable}, new int[] {fieldPosition}, postJoinFilterExpression, limit); Pair<Expression, Expression> keyRangeExpressions = new Pair<Expression, Expression>(null, null); getKeyExpressionCombinations(keyRangeExpressions, context, joinTable.getStatement(), rhsTableRef, type, joinExpressions, hashExpressions); return HashJoinPlan.create(joinTable.getStatement(), rhsPlan, joinInfo, new HashSubPlan[] {new HashSubPlan(0, lhsPlan, hashExpressions, false, keyRangeExpressions.getFirst(), keyRangeExpressions.getSecond())}); } JoinTable lhsJoin = joinTable.getSubJoinTableWithoutPostFilters(); JoinTable rhsJoin = lastJoinSpec.getJoinTable(); if (type == JoinType.Right) { JoinTable temp = lhsJoin; lhsJoin = rhsJoin; rhsJoin = temp; } List<EqualParseNode> joinConditionNodes = lastJoinSpec.getOnConditions(); List<OrderByNode> lhsOrderBy = Lists.<OrderByNode> newArrayListWithExpectedSize(joinConditionNodes.size()); List<OrderByNode> rhsOrderBy = Lists.<OrderByNode> newArrayListWithExpectedSize(joinConditionNodes.size()); for (EqualParseNode condition : joinConditionNodes) { lhsOrderBy.add(NODE_FACTORY.orderBy(type == JoinType.Right ? condition.getRHS() : condition.getLHS(), false, true)); rhsOrderBy.add(NODE_FACTORY.orderBy(type == JoinType.Right ? condition.getLHS() : condition.getRHS(), false, true)); } Scan lhsScan = ScanUtil.newScan(originalScan); StatementContext lhsCtx = new StatementContext(statement, context.getResolver(), lhsScan, new SequenceManager(statement)); boolean preserveRowkey = !projectPKColumns && type != JoinType.Full; QueryPlan lhsPlan = compileJoinQuery(lhsCtx, binds, lhsJoin, true, !preserveRowkey, lhsOrderBy); PTable lhsProjTable = lhsCtx.getResolver().getTables().get(0).getTable(); boolean isInRowKeyOrder = preserveRowkey && lhsPlan.getOrderBy().getOrderByExpressions().isEmpty(); Scan rhsScan = ScanUtil.newScan(originalScan); StatementContext rhsCtx = new StatementContext(statement, context.getResolver(), rhsScan, new SequenceManager(statement)); QueryPlan rhsPlan = compileJoinQuery(rhsCtx, binds, rhsJoin, true, true, rhsOrderBy); PTable rhsProjTable = rhsCtx.getResolver().getTables().get(0).getTable(); Pair<List<Expression>, List<Expression>> joinConditions = lastJoinSpec.compileJoinConditions(type == JoinType.Right ? rhsCtx : lhsCtx, type == JoinType.Right ? lhsCtx : rhsCtx, false); List<Expression> lhsKeyExpressions = type == JoinType.Right ? joinConditions.getSecond() : joinConditions.getFirst(); List<Expression> rhsKeyExpressions = type == JoinType.Right ? joinConditions.getFirst() : joinConditions.getSecond(); boolean needsMerge = rhsJoin.hasPostReference(); int fieldPosition = needsMerge ? lhsProjTable.getColumns().size() - lhsProjTable.getPKColumns().size() : 0; PTable projectedTable = needsMerge ? JoinCompiler.joinProjectedTables(lhsProjTable, rhsProjTable, type == JoinType.Right ? JoinType.Left : type) : lhsProjTable; ColumnResolver resolver = FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), new HashMap<String,UDFParseNode>(1)); TableRef tableRef = resolver.getTables().get(0); StatementContext subCtx = new StatementContext(statement, resolver, ScanUtil.newScan(originalScan), new SequenceManager(statement)); subCtx.setCurrentTable(tableRef); QueryPlan innerPlan = new SortMergeJoinPlan(subCtx, joinTable.getStatement(), tableRef, type == JoinType.Right ? JoinType.Left : type, lhsPlan, rhsPlan, lhsKeyExpressions, rhsKeyExpressions, projectedTable, lhsProjTable, needsMerge ? rhsProjTable : null, fieldPosition, lastJoinSpec.isSingleValueOnly()); context.setCurrentTable(tableRef); context.setResolver(resolver); TableNode from = NODE_FACTORY.namedTable(tableRef.getTableAlias(), NODE_FACTORY.table(tableRef.getTable().getSchemaName().getString(), tableRef.getTable().getTableName().getString())); ParseNode where = joinTable.getPostFiltersCombined(); SelectStatement select = asSubquery ? NODE_FACTORY.select(from, joinTable.getStatement().getHint(), false, Collections.<AliasedNode> emptyList(), where, null, null, orderBy, null, 0, false, joinTable.getStatement().hasSequence(), Collections.<SelectStatement>emptyList(), joinTable.getStatement().getUdfParseNodes()) : NODE_FACTORY.select(joinTable.getStatement(), from, where); return compileSingleFlatQuery(context, select, binds, asSubquery, false, innerPlan, null, isInRowKeyOrder); } #location 169 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testDeleteRowFromTableWithImmutableIndex() throws SQLException { testDeleteRowFromTableWithImmutableIndex(false); }
#vulnerable code @Test public void testDeleteRowFromTableWithImmutableIndex() throws SQLException { Connection con = null; try { boolean autoCommit = false; con = DriverManager.getConnection(getUrl()); con.setAutoCommit(autoCommit); Statement stm = con.createStatement(); stm.execute("CREATE TABLE IF NOT EXISTS web_stats (" + "HOST CHAR(2) NOT NULL," + "DOMAIN VARCHAR NOT NULL, " + "FEATURE VARCHAR NOT NULL, " + "DATE DATE NOT NULL, \n" + "USAGE.CORE BIGINT," + "USAGE.DB BIGINT," + "STATS.ACTIVE_VISITOR INTEGER " + "CONSTRAINT PK PRIMARY KEY (HOST, DOMAIN, FEATURE, DATE)) IMMUTABLE_ROWS=true"); stm.execute("CREATE INDEX web_stats_idx ON web_stats (DATE, FEATURE)"); stm.close(); Date date = new Date(0); PreparedStatement psInsert = con .prepareStatement("UPSERT INTO web_stats(HOST, DOMAIN, FEATURE, DATE, CORE, DB, ACTIVE_VISITOR) VALUES(?,?, ? , ?, ?, ?, ?)"); psInsert.setString(1, "AA"); psInsert.setString(2, "BB"); psInsert.setString(3, "CC"); psInsert.setDate(4, date); psInsert.setLong(5, 1L); psInsert.setLong(6, 2L); psInsert.setLong(7, 3); psInsert.execute(); psInsert.close(); if (!autoCommit) { con.commit(); } psInsert = con.prepareStatement("DELETE FROM web_stats WHERE (HOST, DOMAIN, FEATURE, DATE) = (?,?,?,?)"); psInsert.setString(1, "AA"); psInsert.setString(2, "BB"); psInsert.setString(3, "CC"); psInsert.setDate(4, date); psInsert.execute(); if (!autoCommit) { con.commit(); } ResultSet rs = con.createStatement().executeQuery("SELECT /*+ NO_INDEX */ count(*) FROM web_stats"); assertTrue(rs.next()); assertEquals(0, rs.getLong(1)); rs = con.createStatement().executeQuery("SELECT count(*) FROM web_stats_idx"); assertTrue(rs.next()); assertEquals(0, rs.getLong(1)); } finally { try { con.close(); } catch (Exception ex) { } } } #location 58 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void writeMetrics() throws Exception { Connection conn = getConnectionWithoutTracing(); latch = new CountDownLatch(1); testTraceWriter.start(); // create a simple metrics record long traceid = 987654; String description = "Some generic trace"; long spanid = 10; long parentid = 11; long startTime = 12; long endTime = 13; String processid = "Some process"; String annotation = "test annotation for a span"; Span span = createNewSpan(traceid, parentid, spanid, description, startTime, endTime, processid, annotation); Tracer.getInstance().deliver(span); assertTrue("Span never committed to table", latch.await(30, TimeUnit.SECONDS)); // make sure we only get expected stat entry (matcing the trace id), otherwise we could the // stats for the update as well TraceReader reader = new TraceReader(conn, tracingTableName); Collection<TraceHolder> traces = reader.readAll(10); assertEquals("Wrong number of traces in the tracing table", 1, traces.size()); // validate trace TraceHolder trace = traces.iterator().next(); // we are just going to get an orphan span b/c we don't send in a parent assertEquals("Didn't get expected orphaned spans!" + trace.orphans, 1, trace.orphans.size()); assertEquals(traceid, trace.traceid); SpanInfo spanInfo = trace.orphans.get(0); assertEquals(description, spanInfo.description); assertEquals(parentid, spanInfo.getParentIdForTesting()); assertEquals(startTime, spanInfo.start); assertEquals(endTime, spanInfo.end); assertEquals("Wrong number of tags", 0, spanInfo.tagCount); assertEquals("Wrong number of annotations", 1, spanInfo.annotationCount); }
#vulnerable code @Test public void writeMetrics() throws Exception { Connection conn = getConnectionWithoutTracing(); String tableName = generateUniqueName(); TraceSpanReceiver traceSpanReceiver = new TraceSpanReceiver(); latch = new CountDownLatch(1); testTraceWriter = new TestTraceWriter(tableName, defaultTracingThreadPoolForTest, defaultTracingBatchSizeForTest); // create a simple metrics record long traceid = 987654; String description = "Some generic trace"; long spanid = 10; long parentid = 11; long startTime = 12; long endTime = 13; String processid = "Some process"; String annotation = "test annotation for a span"; Span span = createNewSpan(traceid, parentid, spanid, description, startTime, endTime, processid, annotation); traceSpanReceiver.getSpanQueue().add(span); assertTrue("Span never committed to table", latch.await(30, TimeUnit.SECONDS)); // make sure we only get expected stat entry (matcing the trace id), otherwise we could the // stats for the update as well TraceReader reader = new TraceReader(conn, tableName); Collection<TraceHolder> traces = reader.readAll(10); assertEquals("Wrong number of traces in the tracing table", 1, traces.size()); // validate trace TraceHolder trace = traces.iterator().next(); // we are just going to get an orphan span b/c we don't send in a parent assertEquals("Didn't get expected orphaned spans!" + trace.orphans, 1, trace.orphans.size()); assertEquals(traceid, trace.traceid); SpanInfo spanInfo = trace.orphans.get(0); assertEquals(description, spanInfo.description); assertEquals(parentid, spanInfo.getParentIdForTesting()); assertEquals(startTime, spanInfo.start); assertEquals(endTime, spanInfo.end); assertEquals("Wrong number of tags", 0, spanInfo.tagCount); assertEquals("Wrong number of annotations", 1, spanInfo.annotationCount); } #location 23 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public boolean next(List<Cell> results) throws IOException { if (indexRowKey != null && singleRowRebuildReturnCode == GlobalIndexChecker.RebuildReturnCode.NO_DATA_ROW.getValue()) { byte[] rowCountBytes = PLong.INSTANCE.toBytes(Long.valueOf(singleRowRebuildReturnCode)); final Cell aggKeyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); results.add(aggKeyValue); return false; } Cell lastCell = null; int rowCount = 0; region.startRegionOperation(); RegionScanner localScanner = null; try { byte[] uuidValue = ServerCacheClient.generateId(); localScanner = getLocalScanner(); if (localScanner == null) { return false; } synchronized (localScanner) { if (!shouldVerify()) { skipped = true; return false; } do { /** If region is closing and there are large number of rows being verified/rebuilt with IndexTool, not having this check will impact/delay the region closing -- affecting the availability as this method holds the read lock on the region. * */ ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); List<Cell> row = new ArrayList<Cell>(); hasMore = localScanner.nextRaw(row); if (!row.isEmpty()) { lastCell = row.get(0); // lastCell is any cell from the last visited row Put put = null; Delete del = null; for (Cell cell : row) { if (KeyValue.Type.codeToType(cell.getTypeByte()) == KeyValue.Type.Put) { if (!partialRebuild && familyMap != null && !isColumnIncluded(cell)) { continue; } if (put == null) { put = new Put(CellUtil.cloneRow(cell)); } put.add(cell); } else { if (del == null) { del = new Delete(CellUtil.cloneRow(cell)); } del.addDeleteMarker(cell); } } if (put == null && del == null) { continue; } // Always add the put first and then delete for a given row. This simplifies the logic in // IndexRegionObserver if (put != null) { mutations.add(put); } if (del != null) { mutations.add(del); } if (!verify) { if (put != null) { setMutationAttributes(put, uuidValue); } if (del != null) { setMutationAttributes(del, uuidValue); } uuidValue = commitIfReady(uuidValue, mutations); } else { byte[] dataKey = (put != null) ? put.getRow() : del.getRow(); prepareIndexMutations(put, del); dataKeyToMutationMap.put(dataKey, new Pair<Put, Delete>(put, del)); } rowCount++; } } while (hasMore && rowCount < pageSizeInRows); if (!mutations.isEmpty()) { if (verify) { verifyAndOrRebuildIndex(); } else { ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); ungroupedAggregateRegionObserver.commitBatchWithRetries(region, mutations, blockingMemstoreSize); } } } } catch (Throwable e) { LOGGER.error("Exception in IndexRebuildRegionScanner for region " + region.getRegionInfo().getRegionNameAsString(), e); throw e; } finally { region.closeRegionOperation(); mutations.clear(); if (verify) { dataKeyToMutationMap.clear(); indexKeyToMutationMap.clear(); } if (localScanner!=null && localScanner!=innerScanner) { localScanner.close(); } } if (indexRowKey != null) { rowCount = singleRowRebuildReturnCode; } if (minTimestamp != 0) { nextStartKey = ByteUtil.calculateTheClosestNextRowKeyForPrefix(CellUtil.cloneRow(lastCell)); } byte[] rowCountBytes = PLong.INSTANCE.toBytes(Long.valueOf(rowCount)); final Cell aggKeyValue; if (lastCell == null) { aggKeyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); } else { aggKeyValue = PhoenixKeyValueUtil.newKeyValue(CellUtil.cloneRow(lastCell), SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); } results.add(aggKeyValue); return hasMore || hasMoreIncr; }
#vulnerable code @Override public boolean next(List<Cell> results) throws IOException { if (indexRowKey != null && singleRowRebuildReturnCode == GlobalIndexChecker.RebuildReturnCode.NO_DATA_ROW.getValue()) { byte[] rowCountBytes = PLong.INSTANCE.toBytes(Long.valueOf(singleRowRebuildReturnCode)); final Cell aggKeyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); results.add(aggKeyValue); return false; } Cell lastCell = null; int rowCount = 0; region.startRegionOperation(); RegionScanner localScanner = null; try { byte[] uuidValue = ServerCacheClient.generateId(); localScanner = getLocalScanner(); if (localScanner == null) { return false; } synchronized (localScanner) { if (!shouldVerify()) { skipped = true; return false; } do { List<Cell> row = new ArrayList<Cell>(); hasMore = localScanner.nextRaw(row); if (!row.isEmpty()) { lastCell = row.get(0); // lastCell is any cell from the last visited row Put put = null; Delete del = null; for (Cell cell : row) { if (KeyValue.Type.codeToType(cell.getTypeByte()) == KeyValue.Type.Put) { if (!partialRebuild && familyMap != null && !isColumnIncluded(cell)) { continue; } if (put == null) { put = new Put(CellUtil.cloneRow(cell)); } put.add(cell); } else { if (del == null) { del = new Delete(CellUtil.cloneRow(cell)); } del.addDeleteMarker(cell); } } if (put == null && del == null) { continue; } // Always add the put first and then delete for a given row. This simplifies the logic in // IndexRegionObserver if (put != null) { mutations.add(put); } if (del != null) { mutations.add(del); } if (!verify) { if (put != null) { setMutationAttributes(put, uuidValue); } if (del != null) { setMutationAttributes(del, uuidValue); } uuidValue = commitIfReady(uuidValue, mutations); } else { byte[] dataKey = (put != null) ? put.getRow() : del.getRow(); prepareIndexMutations(put, del); dataKeyToMutationMap.put(dataKey, new Pair<Put, Delete>(put, del)); } rowCount++; } } while (hasMore && rowCount < pageSizeInRows); if (!mutations.isEmpty()) { if (verify) { verifyAndOrRebuildIndex(); } else { ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); ungroupedAggregateRegionObserver.commitBatchWithRetries(region, mutations, blockingMemstoreSize); } } } } catch (Throwable e) { LOGGER.error("Exception in IndexRebuildRegionScanner for region " + region.getRegionInfo().getRegionNameAsString(), e); throw e; } finally { region.closeRegionOperation(); mutations.clear(); if (verify) { dataKeyToMutationMap.clear(); indexKeyToMutationMap.clear(); } if (localScanner!=null && localScanner!=innerScanner) { localScanner.close(); } } if (indexRowKey != null) { rowCount = singleRowRebuildReturnCode; } if (minTimestamp != 0) { nextStartKey = ByteUtil.calculateTheClosestNextRowKeyForPrefix(CellUtil.cloneRow(lastCell)); } byte[] rowCountBytes = PLong.INSTANCE.toBytes(Long.valueOf(rowCount)); final Cell aggKeyValue; if (lastCell == null) { aggKeyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); } else { aggKeyValue = PhoenixKeyValueUtil.newKeyValue(CellUtil.cloneRow(lastCell), SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); } results.add(aggKeyValue); return hasMore || hasMoreIncr; } #location 94 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public byte[] buildRowKey(ValueGetter valueGetter, ImmutableBytesWritable rowKeyPtr, byte[] regionStartKey, byte[] regionEndKey, long ts) { ImmutableBytesWritable ptr = new ImmutableBytesWritable(); boolean prependRegionStartKey = isLocalIndex && regionStartKey != null; boolean isIndexSalted = !isLocalIndex && nIndexSaltBuckets > 0; int prefixKeyLength = prependRegionStartKey ? (regionStartKey.length != 0 ? regionStartKey.length : regionEndKey.length) : 0; TrustedByteArrayOutputStream stream = new TrustedByteArrayOutputStream(estimatedIndexRowKeyBytes + (prependRegionStartKey ? prefixKeyLength : 0)); DataOutput output = new DataOutputStream(stream); try { // For local indexes, we must prepend the row key with the start region key if (prependRegionStartKey) { if (regionStartKey.length == 0) { output.write(new byte[prefixKeyLength]); } else { output.write(regionStartKey); } } if (isIndexSalted) { output.write(0); // will be set at end to index salt byte } // The dataRowKeySchema includes the salt byte field, // so we must adjust for that here. int dataPosOffset = isDataTableSalted ? 1 : 0 ; BitSet viewConstantColumnBitSet = this.rowKeyMetaData.getViewConstantColumnBitSet(); int nIndexedColumns = getIndexPkColumnCount() - getNumViewConstants(); int[][] dataRowKeyLocator = new int[2][nIndexedColumns]; // Skip data table salt byte int maxRowKeyOffset = rowKeyPtr.getOffset() + rowKeyPtr.getLength(); dataRowKeySchema.iterator(rowKeyPtr, ptr, dataPosOffset); if (viewIndexId != null) { output.write(viewIndexId); } if (isMultiTenant) { dataRowKeySchema.next(ptr, dataPosOffset, maxRowKeyOffset); output.write(ptr.get(), ptr.getOffset(), ptr.getLength()); if (!dataRowKeySchema.getField(dataPosOffset).getDataType().isFixedWidth()) { output.writeByte(SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, ptr.getLength()==0, dataRowKeySchema.getField(dataPosOffset))); } dataPosOffset++; } // Write index row key for (int i = dataPosOffset; i < indexDataColumnCount; i++) { Boolean hasValue=dataRowKeySchema.next(ptr, i, maxRowKeyOffset); // Ignore view constants from the data table, as these // don't need to appear in the index (as they're the // same for all rows in this index) if (!viewConstantColumnBitSet.get(i) || isIndexOnBaseTable()) { int pos = rowKeyMetaData.getIndexPkPosition(i-dataPosOffset); if (Boolean.TRUE.equals(hasValue)) { dataRowKeyLocator[0][pos] = ptr.getOffset(); dataRowKeyLocator[1][pos] = ptr.getLength(); } else { dataRowKeyLocator[0][pos] = 0; dataRowKeyLocator[1][pos] = 0; } } } BitSet descIndexColumnBitSet = rowKeyMetaData.getDescIndexColumnBitSet(); Iterator<Expression> expressionIterator = indexedExpressions.iterator(); for (int i = 0; i < nIndexedColumns; i++) { PDataType dataColumnType; boolean isNullable; SortOrder dataSortOrder; if (dataPkPosition[i] == EXPRESSION_NOT_PRESENT) { Expression expression = expressionIterator.next(); dataColumnType = expression.getDataType(); dataSortOrder = expression.getSortOrder(); isNullable = expression.isNullable(); expression.evaluate(new ValueGetterTuple(valueGetter, ts), ptr); } else { Field field = dataRowKeySchema.getField(dataPkPosition[i]); dataColumnType = field.getDataType(); ptr.set(rowKeyPtr.get(), dataRowKeyLocator[0][i], dataRowKeyLocator[1][i]); dataSortOrder = field.getSortOrder(); isNullable = field.isNullable(); } boolean isDataColumnInverted = dataSortOrder != SortOrder.ASC; PDataType indexColumnType = IndexUtil.getIndexColumnDataType(isNullable, dataColumnType); boolean isBytesComparable = dataColumnType.isBytesComparableWith(indexColumnType); boolean isIndexColumnDesc = descIndexColumnBitSet.get(i); if (isBytesComparable && isDataColumnInverted == isIndexColumnDesc) { output.write(ptr.get(), ptr.getOffset(), ptr.getLength()); } else { if (!isBytesComparable) { indexColumnType.coerceBytes(ptr, dataColumnType, dataSortOrder, SortOrder.getDefault()); } if (isDataColumnInverted != isIndexColumnDesc) { writeInverted(ptr.get(), ptr.getOffset(), ptr.getLength(), output); } else { output.write(ptr.get(), ptr.getOffset(), ptr.getLength()); } } if (!indexColumnType.isFixedWidth()) { output.writeByte(SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, ptr.getLength() == 0, isIndexColumnDesc ? SortOrder.DESC : SortOrder.ASC)); } } int length = stream.size(); int minLength = length - maxTrailingNulls; byte[] indexRowKey = stream.getBuffer(); // Remove trailing nulls while (length > minLength && indexRowKey[length-1] == QueryConstants.SEPARATOR_BYTE) { length--; } if (isIndexSalted) { // Set salt byte byte saltByte = SaltingUtil.getSaltingByte(indexRowKey, SaltingUtil.NUM_SALTING_BYTES, length-SaltingUtil.NUM_SALTING_BYTES, nIndexSaltBuckets); indexRowKey[0] = saltByte; } return indexRowKey.length == length ? indexRowKey : Arrays.copyOf(indexRowKey, length); } catch (IOException e) { throw new RuntimeException(e); // Impossible } finally { try { stream.close(); } catch (IOException e) { throw new RuntimeException(e); // Impossible } } }
#vulnerable code public byte[] buildRowKey(ValueGetter valueGetter, ImmutableBytesWritable rowKeyPtr, byte[] regionStartKey, byte[] regionEndKey, long ts) { ImmutableBytesWritable ptr = new ImmutableBytesWritable(); boolean prependRegionStartKey = isLocalIndex && regionStartKey != null; boolean isIndexSalted = !isLocalIndex && nIndexSaltBuckets > 0; int prefixKeyLength = prependRegionStartKey ? (regionStartKey.length != 0 ? regionStartKey.length : regionEndKey.length) : 0; TrustedByteArrayOutputStream stream = new TrustedByteArrayOutputStream(estimatedIndexRowKeyBytes + (prependRegionStartKey ? prefixKeyLength : 0)); DataOutput output = new DataOutputStream(stream); try { // For local indexes, we must prepend the row key with the start region key if (prependRegionStartKey) { if (regionStartKey.length == 0) { output.write(new byte[prefixKeyLength]); } else { output.write(regionStartKey); } } if (isIndexSalted) { output.write(0); // will be set at end to index salt byte } // The dataRowKeySchema includes the salt byte field, // so we must adjust for that here. int dataPosOffset = isDataTableSalted ? 1 : 0 ; BitSet viewConstantColumnBitSet = this.rowKeyMetaData.getViewConstantColumnBitSet(); int nIndexedColumns = getIndexPkColumnCount() - getNumViewConstants(); int[][] dataRowKeyLocator = new int[2][nIndexedColumns]; // Skip data table salt byte int maxRowKeyOffset = rowKeyPtr.getOffset() + rowKeyPtr.getLength(); dataRowKeySchema.iterator(rowKeyPtr, ptr, dataPosOffset); if (viewIndexId != null) { output.write(viewIndexId); } if (isMultiTenant) { dataRowKeySchema.next(ptr, dataPosOffset, maxRowKeyOffset); output.write(ptr.get(), ptr.getOffset(), ptr.getLength()); if (!dataRowKeySchema.getField(dataPosOffset).getDataType().isFixedWidth()) { output.writeByte(SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, ptr.getLength()==0, dataRowKeySchema.getField(dataPosOffset))); } dataPosOffset++; } // Write index row key for (int i = dataPosOffset; i < indexDataColumnCount; i++) { Boolean hasValue=dataRowKeySchema.next(ptr, i, maxRowKeyOffset); // Ignore view constants from the data table, as these // don't need to appear in the index (as they're the // same for all rows in this index) if (!viewConstantColumnBitSet.get(i)) { int pos = rowKeyMetaData.getIndexPkPosition(i-dataPosOffset); if (Boolean.TRUE.equals(hasValue)) { dataRowKeyLocator[0][pos] = ptr.getOffset(); dataRowKeyLocator[1][pos] = ptr.getLength(); } else { dataRowKeyLocator[0][pos] = 0; dataRowKeyLocator[1][pos] = 0; } } } BitSet descIndexColumnBitSet = rowKeyMetaData.getDescIndexColumnBitSet(); Iterator<Expression> expressionIterator = indexedExpressions.iterator(); for (int i = 0; i < nIndexedColumns; i++) { PDataType dataColumnType; boolean isNullable; SortOrder dataSortOrder; if (dataPkPosition[i] == EXPRESSION_NOT_PRESENT) { Expression expression = expressionIterator.next(); dataColumnType = expression.getDataType(); dataSortOrder = expression.getSortOrder(); isNullable = expression.isNullable(); expression.evaluate(new ValueGetterTuple(valueGetter, ts), ptr); } else { Field field = dataRowKeySchema.getField(dataPkPosition[i]); dataColumnType = field.getDataType(); ptr.set(rowKeyPtr.get(), dataRowKeyLocator[0][i], dataRowKeyLocator[1][i]); dataSortOrder = field.getSortOrder(); isNullable = field.isNullable(); } boolean isDataColumnInverted = dataSortOrder != SortOrder.ASC; PDataType indexColumnType = IndexUtil.getIndexColumnDataType(isNullable, dataColumnType); boolean isBytesComparable = dataColumnType.isBytesComparableWith(indexColumnType); boolean isIndexColumnDesc = descIndexColumnBitSet.get(i); if (isBytesComparable && isDataColumnInverted == isIndexColumnDesc) { output.write(ptr.get(), ptr.getOffset(), ptr.getLength()); } else { if (!isBytesComparable) { indexColumnType.coerceBytes(ptr, dataColumnType, dataSortOrder, SortOrder.getDefault()); } if (isDataColumnInverted != isIndexColumnDesc) { writeInverted(ptr.get(), ptr.getOffset(), ptr.getLength(), output); } else { output.write(ptr.get(), ptr.getOffset(), ptr.getLength()); } } if (!indexColumnType.isFixedWidth()) { output.writeByte(SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, ptr.getLength() == 0, isIndexColumnDesc ? SortOrder.DESC : SortOrder.ASC)); } } int length = stream.size(); int minLength = length - maxTrailingNulls; byte[] indexRowKey = stream.getBuffer(); // Remove trailing nulls while (length > minLength && indexRowKey[length-1] == QueryConstants.SEPARATOR_BYTE) { length--; } if (isIndexSalted) { // Set salt byte byte saltByte = SaltingUtil.getSaltingByte(indexRowKey, SaltingUtil.NUM_SALTING_BYTES, length-SaltingUtil.NUM_SALTING_BYTES, nIndexSaltBuckets); indexRowKey[0] = saltByte; } return indexRowKey.length == length ? indexRowKey : Arrays.copyOf(indexRowKey, length); } catch (IOException e) { throw new RuntimeException(e); // Impossible } finally { try { stream.close(); } catch (IOException e) { throw new RuntimeException(e); // Impossible } } } #location 114 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testCSVCommonsUpsert_WithArray() throws Exception { CSVParser parser = null; PhoenixConnection conn = null; try { // Create table String statements = "CREATE TABLE IF NOT EXISTS ARRAY_TABLE " + "(ID BIGINT NOT NULL PRIMARY KEY, VALARRAY INTEGER ARRAY);"; conn = DriverManager.getConnection(getUrl()).unwrap( PhoenixConnection.class); PhoenixRuntime.executeStatements(conn, new StringReader(statements), null); // Upsert CSV file CSVCommonsLoader csvUtil = new CSVCommonsLoader(conn, "ARRAY_TABLE", ImmutableList.<String>of(), true, ',', '"', null, "!"); csvUtil.upsert( new StringReader("ID,VALARRAY\n" + "1,2!3!4\n")); // Compare Phoenix ResultSet with CSV file content PreparedStatement statement = conn .prepareStatement("SELECT ID, VALARRAY FROM ARRAY_TABLE"); ResultSet phoenixResultSet = statement.executeQuery(); assertTrue(phoenixResultSet.next()); assertEquals(1L, phoenixResultSet.getLong(1)); assertEquals( PArrayDataType.instantiatePhoenixArray(PInteger.INSTANCE, new Integer[]{2, 3, 4}), phoenixResultSet.getArray(2)); assertFalse(phoenixResultSet.next()); } finally { if (parser != null) parser.close(); if (conn != null) conn.close(); } }
#vulnerable code @Test public void testCSVCommonsUpsert_WithArray() throws Exception { CSVParser parser = null; PhoenixConnection conn = null; try { // Create table String statements = "CREATE TABLE IF NOT EXISTS ARRAY_TABLE " + "(ID BIGINT NOT NULL PRIMARY KEY, VALARRAY INTEGER ARRAY);"; conn = DriverManager.getConnection(getUrl()).unwrap( PhoenixConnection.class); PhoenixRuntime.executeStatements(conn, new StringReader(statements), null); // Upsert CSV file CSVCommonsLoader csvUtil = new CSVCommonsLoader(conn, "ARRAY_TABLE", null, true, ',', '"', null, "!"); csvUtil.upsert( new StringReader("ID,VALARRAY\n" + "1,2!3!4\n")); // Compare Phoenix ResultSet with CSV file content PreparedStatement statement = conn .prepareStatement("SELECT ID, VALARRAY FROM ARRAY_TABLE"); ResultSet phoenixResultSet = statement.executeQuery(); assertTrue(phoenixResultSet.next()); assertEquals(1L, phoenixResultSet.getLong(1)); assertEquals( PArrayDataType.instantiatePhoenixArray(PInteger.INSTANCE, new Integer[]{2, 3, 4}), phoenixResultSet.getArray(2)); assertFalse(phoenixResultSet.next()); } finally { if (parser != null) parser.close(); if (conn != null) conn.close(); } } #location 18 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private String makeQueryString(JobConf jobConf, String tableName, List<String> readColumnList, String whereClause, String queryTemplate, String hints, Map<String, TypeInfo> columnTypeMap) throws IOException { StringBuilder sql = new StringBuilder(); List<String> conditionColumnList = buildWhereClause(jobConf, sql, whereClause, columnTypeMap); readColumnList = replaceColumns(jobConf, readColumnList); if (conditionColumnList.size() > 0) { addConditionColumnToReadColumn(readColumnList, conditionColumnList); sql.insert(0, queryTemplate.replace("$HINT$", hints).replace("$COLUMN_LIST$", getSelectColumns(jobConf, tableName, readColumnList)).replace("$TABLE_NAME$", tableName)); } else { sql.append(queryTemplate.replace("$HINT$", hints).replace("$COLUMN_LIST$", getSelectColumns(jobConf, tableName, readColumnList)).replace("$TABLE_NAME$", tableName)); } if (LOG.isInfoEnabled()) { LOG.info("Input query : " + sql.toString()); } return sql.toString(); }
#vulnerable code private String makeQueryString(JobConf jobConf, String tableName, List<String> readColumnList, String whereClause, String queryTemplate, String hints, Map<String, TypeInfo> columnTypeMap) throws IOException { StringBuilder sql = new StringBuilder(); List<String> conditionColumnList = buildWhereClause(jobConf, sql, whereClause, columnTypeMap); readColumnList = replaceColumns(jobConf, readColumnList); if (conditionColumnList.size() > 0) { addConditionColumnToReadColumn(readColumnList, conditionColumnList); readColumnList = ColumnMappingUtils.quoteColumns(readColumnList); sql.insert(0, queryTemplate.replace("$HINT$", hints).replace("$COLUMN_LIST$", getSelectColumns(jobConf, tableName, readColumnList)).replace("$TABLE_NAME$", tableName)); } else { readColumnList = ColumnMappingUtils.quoteColumns(readColumnList); sql.append(queryTemplate.replace("$HINT$", hints).replace("$COLUMN_LIST$", getSelectColumns(jobConf, tableName, readColumnList)).replace("$TABLE_NAME$", tableName)); } if (LOG.isInfoEnabled()) { LOG.info("Input query : " + sql.toString()); } return sql.toString(); } #location 10 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private void initTxServiceClient() { txZKClientService = TransactionFactory.getTransactionProvider().getTransactionContext().setTransactionClient(config, props, connectionInfo); }
#vulnerable code private void initTxServiceClient() { txZKClientService = TransactionFactory.getTransactionFactory().getTransactionContext().setTransactionClient(config, props, connectionInfo); } #location 2 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testUpsertWithOldClient() throws Exception { // Insert data with old client and read with new client executeQueryWithClientVersion(compatibleClientVersion, CREATE_ADD); executeQueriesWithCurrentVersion(QUERY); assertExpectedOutput(CREATE_ADD, QUERY); }
#vulnerable code @Test public void testUpsertWithOldClient() throws Exception { checkForPreConditions(); // Insert data with old client and read with new client executeQueryWithClientVersion(compatibleClientVersion, CREATE_ADD); executeQueriesWithCurrentVersion(QUERY); assertTrue(compareOutput(CREATE_ADD, QUERY)); } #location 3 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public boolean next(List<Cell> results) throws IOException { if (indexRowKey != null && singleRowRebuildReturnCode == GlobalIndexChecker.RebuildReturnCode.NO_DATA_ROW.getValue()) { byte[] rowCountBytes = PLong.INSTANCE.toBytes(Long.valueOf(singleRowRebuildReturnCode)); final Cell aggKeyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); results.add(aggKeyValue); return false; } Cell lastCell = null; int rowCount = 0; region.startRegionOperation(); RegionScanner localScanner = null; try { byte[] uuidValue = ServerCacheClient.generateId(); localScanner = getLocalScanner(); if (localScanner == null) { return false; } synchronized (localScanner) { if (!shouldVerify()) { skipped = true; return false; } do { /** If region is closing and there are large number of rows being verified/rebuilt with IndexTool, not having this check will impact/delay the region closing -- affecting the availability as this method holds the read lock on the region. * */ ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); List<Cell> row = new ArrayList<Cell>(); hasMore = localScanner.nextRaw(row); if (!row.isEmpty()) { lastCell = row.get(0); // lastCell is any cell from the last visited row Put put = null; Delete del = null; for (Cell cell : row) { if (KeyValue.Type.codeToType(cell.getTypeByte()) == KeyValue.Type.Put) { if (!partialRebuild && familyMap != null && !isColumnIncluded(cell)) { continue; } if (put == null) { put = new Put(CellUtil.cloneRow(cell)); } put.add(cell); } else { if (del == null) { del = new Delete(CellUtil.cloneRow(cell)); } del.addDeleteMarker(cell); } } if (put == null && del == null) { continue; } // Always add the put first and then delete for a given row. This simplifies the logic in // IndexRegionObserver if (put != null) { mutations.add(put); } if (del != null) { mutations.add(del); } if (!verify) { if (put != null) { setMutationAttributes(put, uuidValue); } if (del != null) { setMutationAttributes(del, uuidValue); } uuidValue = commitIfReady(uuidValue, mutations); } else { byte[] dataKey = (put != null) ? put.getRow() : del.getRow(); prepareIndexMutations(put, del); dataKeyToMutationMap.put(dataKey, new Pair<Put, Delete>(put, del)); } rowCount++; } } while (hasMore && rowCount < pageSizeInRows); if (!mutations.isEmpty()) { if (verify) { verifyAndOrRebuildIndex(); } else { ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); ungroupedAggregateRegionObserver.commitBatchWithRetries(region, mutations, blockingMemstoreSize); } } } } catch (Throwable e) { LOGGER.error("Exception in IndexRebuildRegionScanner for region " + region.getRegionInfo().getRegionNameAsString(), e); throw e; } finally { region.closeRegionOperation(); mutations.clear(); if (verify) { dataKeyToMutationMap.clear(); indexKeyToMutationMap.clear(); } if (localScanner!=null && localScanner!=innerScanner) { localScanner.close(); } } if (indexRowKey != null) { rowCount = singleRowRebuildReturnCode; } if (minTimestamp != 0) { nextStartKey = ByteUtil.calculateTheClosestNextRowKeyForPrefix(CellUtil.cloneRow(lastCell)); } byte[] rowCountBytes = PLong.INSTANCE.toBytes(Long.valueOf(rowCount)); final Cell aggKeyValue; if (lastCell == null) { aggKeyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); } else { aggKeyValue = PhoenixKeyValueUtil.newKeyValue(CellUtil.cloneRow(lastCell), SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); } results.add(aggKeyValue); return hasMore || hasMoreIncr; }
#vulnerable code @Override public boolean next(List<Cell> results) throws IOException { if (indexRowKey != null && singleRowRebuildReturnCode == GlobalIndexChecker.RebuildReturnCode.NO_DATA_ROW.getValue()) { byte[] rowCountBytes = PLong.INSTANCE.toBytes(Long.valueOf(singleRowRebuildReturnCode)); final Cell aggKeyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); results.add(aggKeyValue); return false; } Cell lastCell = null; int rowCount = 0; region.startRegionOperation(); RegionScanner localScanner = null; try { byte[] uuidValue = ServerCacheClient.generateId(); localScanner = getLocalScanner(); if (localScanner == null) { return false; } synchronized (localScanner) { if (!shouldVerify()) { skipped = true; return false; } do { List<Cell> row = new ArrayList<Cell>(); hasMore = localScanner.nextRaw(row); if (!row.isEmpty()) { lastCell = row.get(0); // lastCell is any cell from the last visited row Put put = null; Delete del = null; for (Cell cell : row) { if (KeyValue.Type.codeToType(cell.getTypeByte()) == KeyValue.Type.Put) { if (!partialRebuild && familyMap != null && !isColumnIncluded(cell)) { continue; } if (put == null) { put = new Put(CellUtil.cloneRow(cell)); } put.add(cell); } else { if (del == null) { del = new Delete(CellUtil.cloneRow(cell)); } del.addDeleteMarker(cell); } } if (put == null && del == null) { continue; } // Always add the put first and then delete for a given row. This simplifies the logic in // IndexRegionObserver if (put != null) { mutations.add(put); } if (del != null) { mutations.add(del); } if (!verify) { if (put != null) { setMutationAttributes(put, uuidValue); } if (del != null) { setMutationAttributes(del, uuidValue); } uuidValue = commitIfReady(uuidValue, mutations); } else { byte[] dataKey = (put != null) ? put.getRow() : del.getRow(); prepareIndexMutations(put, del); dataKeyToMutationMap.put(dataKey, new Pair<Put, Delete>(put, del)); } rowCount++; } } while (hasMore && rowCount < pageSizeInRows); if (!mutations.isEmpty()) { if (verify) { verifyAndOrRebuildIndex(); } else { ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); ungroupedAggregateRegionObserver.commitBatchWithRetries(region, mutations, blockingMemstoreSize); } } } } catch (Throwable e) { LOGGER.error("Exception in IndexRebuildRegionScanner for region " + region.getRegionInfo().getRegionNameAsString(), e); throw e; } finally { region.closeRegionOperation(); mutations.clear(); if (verify) { dataKeyToMutationMap.clear(); indexKeyToMutationMap.clear(); } if (localScanner!=null && localScanner!=innerScanner) { localScanner.close(); } } if (indexRowKey != null) { rowCount = singleRowRebuildReturnCode; } if (minTimestamp != 0) { nextStartKey = ByteUtil.calculateTheClosestNextRowKeyForPrefix(CellUtil.cloneRow(lastCell)); } byte[] rowCountBytes = PLong.INSTANCE.toBytes(Long.valueOf(rowCount)); final Cell aggKeyValue; if (lastCell == null) { aggKeyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); } else { aggKeyValue = PhoenixKeyValueUtil.newKeyValue(CellUtil.cloneRow(lastCell), SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); } results.add(aggKeyValue); return hasMore || hasMoreIncr; } #location 18 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public DataValue getDataForRule(Scenario scenario, Column phxMetaColumn) throws Exception { // TODO Make a Set of Rules that have already been applied so that so we don't generate for every value List<Scenario> scenarios = parser.getScenarios(); DataValue value = null; if (scenarios.contains(scenario)) { logger.debug("We found a correct Scenario"); // Assume the first rule map Map<DataTypeMapping, List> ruleMap = modelList.get(0); List<Column> ruleList = ruleMap.get(phxMetaColumn.getType()); // Make sure Column from Phoenix Metadata matches a rule column if (ruleList.contains(phxMetaColumn)) { // Generate some random data based on this rule logger.debug("We found a correct column rule"); Column columnRule = getColumnForRule(ruleList, phxMetaColumn); value = getDataValue(columnRule); } else { logger.warn("Attempted to apply rule to data, but could not find a rule to match type:" + phxMetaColumn.getType() ); } } return value; }
#vulnerable code public DataValue getDataForRule(Scenario scenario, Column phxMetaColumn) throws Exception { // TODO Make a Set of Rules that have already been applied so that so we don't generate for every value List<Scenario> scenarios = parser.getScenarios(); DataValue value = null; if (scenarios.contains(scenario)) { logger.debug("We found a correct Scenario"); // Assume the first rule map Map<DataTypeMapping, List> ruleMap = modelList.get(0); List<Column> ruleList = ruleMap.get(phxMetaColumn.getType()); // Make sure Column from Phoenix Metadata matches a rule column if (ruleList.contains(phxMetaColumn)) { // Generate some random data based on this rule logger.debug("We found a correct column rule"); Column columnRule = getColumnForRule(ruleList, phxMetaColumn); value = getDataValue(columnRule); synchronized (value) { // Add the prefix to the value if it exists. if (columnRule.getPrefix() != null) { value.setValue(columnRule.getPrefix() + value.getValue()); } } } else { logger.warn("Attempted to apply rule to data, but could not find a rule to match type:" + phxMetaColumn.getType() ); } } return value; } #location 16 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testLocalIndexTableRegionSplitPolicyAndSplitKeys() throws Exception { createBaseTable(DATA_TABLE_NAME, null,"('e','i','o')"); Connection conn1 = DriverManager.getConnection(getUrl()); Connection conn2 = DriverManager.getConnection(getUrl()); conn1.createStatement().execute("CREATE LOCAL INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_NAME + "(v1)"); conn2.createStatement().executeQuery("SELECT * FROM " + DATA_TABLE_FULL_NAME).next(); HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin(); HTableDescriptor htd = admin.getTableDescriptor(TableName.valueOf(MetaDataUtil.getLocalIndexTableName(DATA_TABLE_NAME))); assertEquals(IndexRegionSplitPolicy.class.getName(), htd.getValue(HTableDescriptor.SPLIT_POLICY)); try (HTable userTable = new HTable(admin.getConfiguration(),TableName.valueOf(DATA_TABLE_NAME))) { try (HTable indexTable = new HTable(admin.getConfiguration(),TableName.valueOf(MetaDataUtil.getLocalIndexTableName(DATA_TABLE_NAME)))) { assertArrayEquals("Both user table and index table should have same split keys.", userTable.getStartKeys(), indexTable.getStartKeys()); } } }
#vulnerable code @Test public void testLocalIndexTableRegionSplitPolicyAndSplitKeys() throws Exception { createBaseTable(DATA_TABLE_NAME, null,"('e','i','o')"); Connection conn1 = DriverManager.getConnection(getUrl()); Connection conn2 = DriverManager.getConnection(getUrl()); conn1.createStatement().execute("CREATE LOCAL INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_NAME + "(v1)"); conn2.createStatement().executeQuery("SELECT * FROM " + DATA_TABLE_FULL_NAME).next(); HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin(); HTableDescriptor htd = admin.getTableDescriptor(TableName.valueOf(MetaDataUtil.getLocalIndexTableName(DATA_TABLE_NAME))); assertEquals(IndexRegionSplitPolicy.class.getName(), htd.getValue(HTableDescriptor.SPLIT_POLICY)); HTable userTable = new HTable(admin.getConfiguration(),TableName.valueOf(DATA_TABLE_NAME)); HTable indexTable = new HTable(admin.getConfiguration(),TableName.valueOf(MetaDataUtil.getLocalIndexTableName(DATA_TABLE_NAME))); assertArrayEquals("Both user table and index table should have same split keys.", userTable.getStartKeys(), indexTable.getStartKeys()); } #location 13 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testUpsertSelectSameBatchConcurrently() throws Exception { try (Connection conn = driver.connect(url, props)) { int numUpsertSelectRunners = 5; ExecutorService exec = Executors.newFixedThreadPool(numUpsertSelectRunners); CompletionService<Boolean> completionService = new ExecutorCompletionService<Boolean>(exec); List<Future<Boolean>> futures = Lists.newArrayListWithExpectedSize(numUpsertSelectRunners); // run one UPSERT SELECT for 100 rows (that locks the rows for a long time) futures.add(completionService.submit(new UpsertSelectRunner(dataTable, 0, 105, 1))); // run four UPSERT SELECTS for 5 rows (that overlap with slow running UPSERT SELECT) for (int i = 0; i < 100; i += 25) { futures.add(completionService.submit(new UpsertSelectRunner(dataTable, i, i+25, 5))); } int received = 0; while (received < futures.size()) { Future<Boolean> resultFuture = completionService.take(); Boolean result = resultFuture.get(); received++; assertTrue(result); } exec.shutdownNow(); } }
#vulnerable code @Test public void testUpsertSelectSameBatchConcurrently() throws Exception { final String dataTable = generateUniqueName(); final String index = "IDX_" + dataTable; // create the table and ensure its empty Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); Connection conn = driver.connect(url, props); conn.createStatement() .execute("CREATE TABLE " + dataTable + " (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)"); // create the index and ensure its empty as well conn.createStatement().execute("CREATE INDEX " + index + " ON " + dataTable + " (v1)"); conn = DriverManager.getConnection(getUrl(), props); PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + dataTable + " VALUES(?,?,?)"); conn.setAutoCommit(false); for (int i = 0; i < 100; i++) { stmt.setInt(1, i); stmt.setString(2, "v1" + i); stmt.setString(3, "v2" + i); stmt.execute(); } conn.commit(); int numUpsertSelectRunners = 5; ExecutorService exec = Executors.newFixedThreadPool(numUpsertSelectRunners); CompletionService<Boolean> completionService = new ExecutorCompletionService<Boolean>(exec); List<Future<Boolean>> futures = Lists.newArrayListWithExpectedSize(numUpsertSelectRunners); // run one UPSERT SELECT for 100 rows (that locks the rows for a long time) futures.add(completionService.submit(new UpsertSelectRunner(dataTable, 0, 105, 1))); // run four UPSERT SELECTS for 5 rows (that overlap with slow running UPSERT SELECT) for (int i = 0; i < 100; i += 25) { futures.add(completionService.submit(new UpsertSelectRunner(dataTable, i, i+25, 5))); } int received = 0; while (received < futures.size()) { Future<Boolean> resultFuture = completionService.take(); Boolean result = resultFuture.get(); received++; assertTrue(result); } exec.shutdownNow(); conn.close(); } #location 8 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public byte[] buildRowKey(ValueGetter valueGetter, ImmutableBytesWritable rowKeyPtr, byte[] regionStartKey, byte[] regionEndKey, long ts) { ImmutableBytesWritable ptr = new ImmutableBytesWritable(); boolean prependRegionStartKey = isLocalIndex && regionStartKey != null; boolean isIndexSalted = !isLocalIndex && nIndexSaltBuckets > 0; int prefixKeyLength = prependRegionStartKey ? (regionStartKey.length != 0 ? regionStartKey.length : regionEndKey.length) : 0; TrustedByteArrayOutputStream stream = new TrustedByteArrayOutputStream(estimatedIndexRowKeyBytes + (prependRegionStartKey ? prefixKeyLength : 0)); DataOutput output = new DataOutputStream(stream); try { // For local indexes, we must prepend the row key with the start region key if (prependRegionStartKey) { if (regionStartKey.length == 0) { output.write(new byte[prefixKeyLength]); } else { output.write(regionStartKey); } } if (isIndexSalted) { output.write(0); // will be set at end to index salt byte } // The dataRowKeySchema includes the salt byte field, // so we must adjust for that here. int dataPosOffset = isDataTableSalted ? 1 : 0 ; BitSet viewConstantColumnBitSet = this.rowKeyMetaData.getViewConstantColumnBitSet(); int nIndexedColumns = getIndexPkColumnCount() - getNumViewConstants(); int[][] dataRowKeyLocator = new int[2][nIndexedColumns]; // Skip data table salt byte int maxRowKeyOffset = rowKeyPtr.getOffset() + rowKeyPtr.getLength(); dataRowKeySchema.iterator(rowKeyPtr, ptr, dataPosOffset); if (viewIndexId != null) { output.write(viewIndexId); } if (isMultiTenant) { dataRowKeySchema.next(ptr, dataPosOffset, maxRowKeyOffset); output.write(ptr.get(), ptr.getOffset(), ptr.getLength()); if (!dataRowKeySchema.getField(dataPosOffset).getDataType().isFixedWidth()) { output.writeByte(SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, ptr.getLength()==0, dataRowKeySchema.getField(dataPosOffset))); } dataPosOffset++; } // Write index row key for (int i = dataPosOffset; i < indexDataColumnCount; i++) { Boolean hasValue=dataRowKeySchema.next(ptr, i, maxRowKeyOffset); // Ignore view constants from the data table, as these // don't need to appear in the index (as they're the // same for all rows in this index) if (!viewConstantColumnBitSet.get(i) || isIndexOnBaseTable()) { int pos = rowKeyMetaData.getIndexPkPosition(i-dataPosOffset); if (Boolean.TRUE.equals(hasValue)) { dataRowKeyLocator[0][pos] = ptr.getOffset(); dataRowKeyLocator[1][pos] = ptr.getLength(); } else { dataRowKeyLocator[0][pos] = 0; dataRowKeyLocator[1][pos] = 0; } } } BitSet descIndexColumnBitSet = rowKeyMetaData.getDescIndexColumnBitSet(); Iterator<Expression> expressionIterator = indexedExpressions.iterator(); for (int i = 0; i < nIndexedColumns; i++) { PDataType dataColumnType; boolean isNullable; SortOrder dataSortOrder; if (dataPkPosition[i] == EXPRESSION_NOT_PRESENT) { Expression expression = expressionIterator.next(); dataColumnType = expression.getDataType(); dataSortOrder = expression.getSortOrder(); isNullable = expression.isNullable(); expression.evaluate(new ValueGetterTuple(valueGetter, ts), ptr); } else { Field field = dataRowKeySchema.getField(dataPkPosition[i]); dataColumnType = field.getDataType(); ptr.set(rowKeyPtr.get(), dataRowKeyLocator[0][i], dataRowKeyLocator[1][i]); dataSortOrder = field.getSortOrder(); isNullable = field.isNullable(); } boolean isDataColumnInverted = dataSortOrder != SortOrder.ASC; PDataType indexColumnType = IndexUtil.getIndexColumnDataType(isNullable, dataColumnType); boolean isBytesComparable = dataColumnType.isBytesComparableWith(indexColumnType); boolean isIndexColumnDesc = descIndexColumnBitSet.get(i); if (isBytesComparable && isDataColumnInverted == isIndexColumnDesc) { output.write(ptr.get(), ptr.getOffset(), ptr.getLength()); } else { if (!isBytesComparable) { indexColumnType.coerceBytes(ptr, dataColumnType, dataSortOrder, SortOrder.getDefault()); } if (isDataColumnInverted != isIndexColumnDesc) { writeInverted(ptr.get(), ptr.getOffset(), ptr.getLength(), output); } else { output.write(ptr.get(), ptr.getOffset(), ptr.getLength()); } } if (!indexColumnType.isFixedWidth()) { output.writeByte(SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, ptr.getLength() == 0, isIndexColumnDesc ? SortOrder.DESC : SortOrder.ASC)); } } int length = stream.size(); int minLength = length - maxTrailingNulls; byte[] indexRowKey = stream.getBuffer(); // Remove trailing nulls while (length > minLength && indexRowKey[length-1] == QueryConstants.SEPARATOR_BYTE) { length--; } if (isIndexSalted) { // Set salt byte byte saltByte = SaltingUtil.getSaltingByte(indexRowKey, SaltingUtil.NUM_SALTING_BYTES, length-SaltingUtil.NUM_SALTING_BYTES, nIndexSaltBuckets); indexRowKey[0] = saltByte; } return indexRowKey.length == length ? indexRowKey : Arrays.copyOf(indexRowKey, length); } catch (IOException e) { throw new RuntimeException(e); // Impossible } finally { try { stream.close(); } catch (IOException e) { throw new RuntimeException(e); // Impossible } } }
#vulnerable code public byte[] buildRowKey(ValueGetter valueGetter, ImmutableBytesWritable rowKeyPtr, byte[] regionStartKey, byte[] regionEndKey, long ts) { ImmutableBytesWritable ptr = new ImmutableBytesWritable(); boolean prependRegionStartKey = isLocalIndex && regionStartKey != null; boolean isIndexSalted = !isLocalIndex && nIndexSaltBuckets > 0; int prefixKeyLength = prependRegionStartKey ? (regionStartKey.length != 0 ? regionStartKey.length : regionEndKey.length) : 0; TrustedByteArrayOutputStream stream = new TrustedByteArrayOutputStream(estimatedIndexRowKeyBytes + (prependRegionStartKey ? prefixKeyLength : 0)); DataOutput output = new DataOutputStream(stream); try { // For local indexes, we must prepend the row key with the start region key if (prependRegionStartKey) { if (regionStartKey.length == 0) { output.write(new byte[prefixKeyLength]); } else { output.write(regionStartKey); } } if (isIndexSalted) { output.write(0); // will be set at end to index salt byte } // The dataRowKeySchema includes the salt byte field, // so we must adjust for that here. int dataPosOffset = isDataTableSalted ? 1 : 0 ; BitSet viewConstantColumnBitSet = this.rowKeyMetaData.getViewConstantColumnBitSet(); int nIndexedColumns = getIndexPkColumnCount() - getNumViewConstants(); int[][] dataRowKeyLocator = new int[2][nIndexedColumns]; // Skip data table salt byte int maxRowKeyOffset = rowKeyPtr.getOffset() + rowKeyPtr.getLength(); dataRowKeySchema.iterator(rowKeyPtr, ptr, dataPosOffset); if (viewIndexId != null) { output.write(viewIndexId); } if (isMultiTenant) { dataRowKeySchema.next(ptr, dataPosOffset, maxRowKeyOffset); output.write(ptr.get(), ptr.getOffset(), ptr.getLength()); if (!dataRowKeySchema.getField(dataPosOffset).getDataType().isFixedWidth()) { output.writeByte(SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, ptr.getLength()==0, dataRowKeySchema.getField(dataPosOffset))); } dataPosOffset++; } // Write index row key for (int i = dataPosOffset; i < indexDataColumnCount; i++) { Boolean hasValue=dataRowKeySchema.next(ptr, i, maxRowKeyOffset); // Ignore view constants from the data table, as these // don't need to appear in the index (as they're the // same for all rows in this index) if (!viewConstantColumnBitSet.get(i)) { int pos = rowKeyMetaData.getIndexPkPosition(i-dataPosOffset); if (Boolean.TRUE.equals(hasValue)) { dataRowKeyLocator[0][pos] = ptr.getOffset(); dataRowKeyLocator[1][pos] = ptr.getLength(); } else { dataRowKeyLocator[0][pos] = 0; dataRowKeyLocator[1][pos] = 0; } } } BitSet descIndexColumnBitSet = rowKeyMetaData.getDescIndexColumnBitSet(); Iterator<Expression> expressionIterator = indexedExpressions.iterator(); for (int i = 0; i < nIndexedColumns; i++) { PDataType dataColumnType; boolean isNullable; SortOrder dataSortOrder; if (dataPkPosition[i] == EXPRESSION_NOT_PRESENT) { Expression expression = expressionIterator.next(); dataColumnType = expression.getDataType(); dataSortOrder = expression.getSortOrder(); isNullable = expression.isNullable(); expression.evaluate(new ValueGetterTuple(valueGetter, ts), ptr); } else { Field field = dataRowKeySchema.getField(dataPkPosition[i]); dataColumnType = field.getDataType(); ptr.set(rowKeyPtr.get(), dataRowKeyLocator[0][i], dataRowKeyLocator[1][i]); dataSortOrder = field.getSortOrder(); isNullable = field.isNullable(); } boolean isDataColumnInverted = dataSortOrder != SortOrder.ASC; PDataType indexColumnType = IndexUtil.getIndexColumnDataType(isNullable, dataColumnType); boolean isBytesComparable = dataColumnType.isBytesComparableWith(indexColumnType); boolean isIndexColumnDesc = descIndexColumnBitSet.get(i); if (isBytesComparable && isDataColumnInverted == isIndexColumnDesc) { output.write(ptr.get(), ptr.getOffset(), ptr.getLength()); } else { if (!isBytesComparable) { indexColumnType.coerceBytes(ptr, dataColumnType, dataSortOrder, SortOrder.getDefault()); } if (isDataColumnInverted != isIndexColumnDesc) { writeInverted(ptr.get(), ptr.getOffset(), ptr.getLength(), output); } else { output.write(ptr.get(), ptr.getOffset(), ptr.getLength()); } } if (!indexColumnType.isFixedWidth()) { output.writeByte(SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, ptr.getLength() == 0, isIndexColumnDesc ? SortOrder.DESC : SortOrder.ASC)); } } int length = stream.size(); int minLength = length - maxTrailingNulls; byte[] indexRowKey = stream.getBuffer(); // Remove trailing nulls while (length > minLength && indexRowKey[length-1] == QueryConstants.SEPARATOR_BYTE) { length--; } if (isIndexSalted) { // Set salt byte byte saltByte = SaltingUtil.getSaltingByte(indexRowKey, SaltingUtil.NUM_SALTING_BYTES, length-SaltingUtil.NUM_SALTING_BYTES, nIndexSaltBuckets); indexRowKey[0] = saltByte; } return indexRowKey.length == length ? indexRowKey : Arrays.copyOf(indexRowKey, length); } catch (IOException e) { throw new RuntimeException(e); // Impossible } finally { try { stream.close(); } catch (IOException e) { throw new RuntimeException(e); // Impossible } } } #location 106 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testIndexHalfStoreFileReader() throws Exception { Connection conn1 = getConnection(); ConnectionQueryServices connectionQueryServices = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES); HBaseAdmin admin = connectionQueryServices.getAdmin(); String tableName = "TBL_" + generateUniqueName(); String indexName = "IDX_" + generateUniqueName(); createBaseTable(conn1, tableName, "('e')"); conn1.createStatement().execute("CREATE "+(localIndex?"LOCAL":"")+" INDEX " + indexName + " ON " + tableName + "(v1)" + (localIndex?"":" SPLIT ON ('e')")); conn1.createStatement().execute("UPSERT INTO "+tableName+" values('b',1,2,4,'z')"); conn1.createStatement().execute("UPSERT INTO "+tableName+" values('f',1,2,3,'z')"); conn1.createStatement().execute("UPSERT INTO "+tableName+" values('j',2,4,2,'a')"); conn1.createStatement().execute("UPSERT INTO "+tableName+" values('q',3,1,1,'c')"); conn1.commit(); String query = "SELECT count(*) FROM " + tableName +" where v1<='z'"; ResultSet rs = conn1.createStatement().executeQuery(query); assertTrue(rs.next()); assertEquals(4, rs.getInt(1)); TableName indexTable = TableName.valueOf(localIndex?tableName: indexName); admin.flush(indexTable); boolean merged = false; HTableInterface table = connectionQueryServices.getTable(indexTable.getName()); // merge regions until 1 left long numRegions = 0; while (true) { rs = conn1.createStatement().executeQuery(query); assertTrue(rs.next()); assertEquals(4, rs.getInt(1)); //TODO this returns 5 sometimes instead of 4, duplicate results? try { List<HRegionInfo> indexRegions = admin.getTableRegions(indexTable); numRegions = indexRegions.size(); if (numRegions==1) { break; } if(!merged) { List<HRegionInfo> regions = admin.getTableRegions(indexTable); Log.info("Merging: " + regions.size()); admin.mergeRegions(regions.get(0).getEncodedNameAsBytes(), regions.get(1).getEncodedNameAsBytes(), false); merged = true; Threads.sleep(10000); } } catch (Exception ex) { Log.info(ex); } long waitStartTime = System.currentTimeMillis(); // wait until merge happened while (System.currentTimeMillis() - waitStartTime < 10000) { List<HRegionInfo> regions = admin.getTableRegions(indexTable); Log.info("Waiting:" + regions.size()); if (regions.size() < numRegions) { break; } Threads.sleep(1000); } SnapshotTestingUtils.waitForTableToBeOnline(BaseTest.getUtility(), indexTable); assertTrue("Index table should be online ", admin.isTableAvailable(indexTable)); } }
#vulnerable code @Test public void testIndexHalfStoreFileReader() throws Exception { Connection conn1 = getConnection(); ConnectionQueryServices connectionQueryServices = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES); HBaseAdmin admin = connectionQueryServices.getAdmin(); String tableName = "TBL_" + generateUniqueName(); String indexName = "IDX_" + generateUniqueName(); try { dropTable(admin, conn1); createBaseTable(conn1, tableName, "('e')"); conn1.createStatement().execute("CREATE "+(localIndex?"LOCAL":"")+" INDEX " + indexName + " ON " + tableName + "(v1)" + (localIndex?"":" SPLIT ON ('e')")); conn1.createStatement().execute("UPSERT INTO "+tableName+" values('b',1,2,4,'z')"); conn1.createStatement().execute("UPSERT INTO "+tableName+" values('f',1,2,3,'z')"); conn1.createStatement().execute("UPSERT INTO "+tableName+" values('j',2,4,2,'a')"); conn1.createStatement().execute("UPSERT INTO "+tableName+" values('q',3,1,1,'c')"); conn1.commit(); String query = "SELECT count(*) FROM " + tableName +" where v1<='z'"; ResultSet rs = conn1.createStatement().executeQuery(query); assertTrue(rs.next()); assertEquals(4, rs.getInt(1)); TableName indexTable = TableName.valueOf(localIndex?tableName: indexName); admin.flush(indexTable); boolean merged = false; HTableInterface table = connectionQueryServices.getTable(indexTable.getName()); // merge regions until 1 left long numRegions = 0; while (true) { rs = conn1.createStatement().executeQuery(query); assertTrue(rs.next()); assertEquals(4, rs.getInt(1)); //TODO this returns 5 sometimes instead of 4, duplicate results? try { List<HRegionInfo> indexRegions = admin.getTableRegions(indexTable); numRegions = indexRegions.size(); if (numRegions==1) { break; } if(!merged) { List<HRegionInfo> regions = admin.getTableRegions(indexTable); Log.info("Merging: " + regions.size()); admin.mergeRegions(regions.get(0).getEncodedNameAsBytes(), regions.get(1).getEncodedNameAsBytes(), false); merged = true; Threads.sleep(10000); } } catch (Exception ex) { Log.info(ex); } long waitStartTime = System.currentTimeMillis(); // wait until merge happened while (System.currentTimeMillis() - waitStartTime < 10000) { List<HRegionInfo> regions = admin.getTableRegions(indexTable); Log.info("Waiting:" + regions.size()); if (regions.size() < numRegions) { break; } Threads.sleep(1000); } SnapshotTestingUtils.waitForTableToBeOnline(BaseTest.getUtility(), indexTable); assertTrue("Index table should be online ", admin.isTableAvailable(indexTable)); } } finally { dropTable(admin, conn1); } } #location 9 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public boolean seekOrReseek(Cell cell, boolean isSeek) throws IOException{ Cell keyToSeek = cell; KeyValue splitKeyValue = new KeyValue.KeyOnlyKeyValue(reader.getSplitkey()); if (reader.isTop()) { if(this.comparator.compare(cell, splitKeyValue, true) < 0){ if(!isSeek && realSeekDone()) { return true; } return seekOrReseekToProperKey(isSeek, keyToSeek); } keyToSeek = getKeyPresentInHFiles(cell); return seekOrReseekToProperKey(isSeek, keyToSeek); } else { if (this.comparator.compare(cell, splitKeyValue, true) >= 0) { close(); return false; } if(!isSeek && reader.getRegionInfo().getStartKey().length == 0 && reader.getSplitRow().length > reader.getRegionStartKeyInHFile().length) { keyToSeek = getKeyPresentInHFiles(cell); } } return seekOrReseekToProperKey(isSeek, keyToSeek); }
#vulnerable code public boolean seekOrReseek(Cell cell, boolean isSeek) throws IOException{ KeyValue kv = PhoenixKeyValueUtil.maybeCopyCell(cell); KeyValue keyToSeek = kv; KeyValue splitKeyValue = KeyValueUtil.createKeyValueFromKey(reader.getSplitkey()); if (reader.isTop()) { if(getComparator().compare(kv, splitKeyValue) < 0){ if(!isSeek && realSeekDone()) { return true; } return seekOrReseekToProperKey(isSeek, keyToSeek); } keyToSeek = getKeyPresentInHFiles(kv.getRowArray()); return seekOrReseekToProperKey(isSeek, keyToSeek); } else { if (getComparator().compare(kv, splitKeyValue) >= 0) { close(); return false; } if(!isSeek && reader.getRegionInfo().getStartKey().length == 0 && reader.getSplitRow().length > reader.getRegionStartKeyInHFile().length) { keyToSeek = getKeyPresentInHFiles(kv.getRowArray()); } } return seekOrReseekToProperKey(isSeek, keyToSeek); } #location 12 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test(timeout=300000) public void testWriteFailureDisablesIndex() throws Exception { testWriteFailureDisablesIndex(false); }
#vulnerable code @Test(timeout=300000) public void testWriteFailureDisablesIndex() throws Exception { String query; ResultSet rs; Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); Connection conn = driver.connect(url, props); conn.setAutoCommit(false); conn.createStatement().execute( "CREATE TABLE " + DATA_TABLE_FULL_NAME + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)"); query = "SELECT * FROM " + DATA_TABLE_FULL_NAME; rs = conn.createStatement().executeQuery(query); assertFalse(rs.next()); conn.createStatement().execute( "CREATE INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_FULL_NAME + " (v1) INCLUDE (v2)"); query = "SELECT * FROM " + INDEX_TABLE_FULL_NAME; rs = conn.createStatement().executeQuery(query); assertFalse(rs.next()); // Verify the metadata for index is correct. rs = conn.getMetaData().getTables(null, StringUtil.escapeLike(SCHEMA_NAME), INDEX_TABLE_NAME, new String[] { PTableType.INDEX.toString() }); assertTrue(rs.next()); assertEquals(INDEX_TABLE_NAME, rs.getString(3)); assertEquals(PIndexState.ACTIVE.toString(), rs.getString("INDEX_STATE")); assertFalse(rs.next()); PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + " VALUES(?,?,?)"); stmt.setString(1, "a"); stmt.setString(2, "x"); stmt.setString(3, "1"); stmt.execute(); conn.commit(); TableName indexTable = TableName.valueOf(INDEX_TABLE_FULL_NAME); HBaseAdmin admin = this.util.getHBaseAdmin(); HTableDescriptor indexTableDesc = admin.getTableDescriptor(indexTable); try{ admin.disableTable(indexTable); admin.deleteTable(indexTable); } catch (TableNotFoundException ignore) {} stmt = conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + " VALUES(?,?,?)"); stmt.setString(1, "a2"); stmt.setString(2, "x2"); stmt.setString(3, "2"); stmt.execute(); try { conn.commit(); } catch (SQLException e) {} // Verify the metadata for index is correct. rs = conn.getMetaData().getTables(null, StringUtil.escapeLike(SCHEMA_NAME), INDEX_TABLE_NAME, new String[] { PTableType.INDEX.toString() }); assertTrue(rs.next()); assertEquals(INDEX_TABLE_NAME, rs.getString(3)); assertEquals(PIndexState.DISABLE.toString(), rs.getString("INDEX_STATE")); assertFalse(rs.next()); // Verify UPSERT on data table still work after index is disabled stmt = conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + " VALUES(?,?,?)"); stmt.setString(1, "a3"); stmt.setString(2, "x3"); stmt.setString(3, "3"); stmt.execute(); conn.commit(); query = "SELECT v2 FROM " + DATA_TABLE_FULL_NAME + " where v1='x3'"; rs = conn.createStatement().executeQuery("EXPLAIN " + query); assertTrue(QueryUtil.getExplainPlan(rs).contains("CLIENT PARALLEL 1-WAY FULL SCAN OVER " + DATA_TABLE_FULL_NAME)); rs = conn.createStatement().executeQuery(query); assertTrue(rs.next()); // recreate index table admin.createTable(indexTableDesc); do { Thread.sleep(15 * 1000); // sleep 15 secs rs = conn.getMetaData().getTables(null, StringUtil.escapeLike(SCHEMA_NAME), INDEX_TABLE_NAME, new String[] { PTableType.INDEX.toString() }); assertTrue(rs.next()); if(PIndexState.ACTIVE.toString().equals(rs.getString("INDEX_STATE"))){ break; } } while(true); // verify index table has data query = "SELECT count(1) FROM " + INDEX_TABLE_FULL_NAME; rs = conn.createStatement().executeQuery(query); assertTrue(rs.next()); // using 2 here because we only partially build index from where we failed and the oldest // index row has been deleted when we dropped the index table during test. assertEquals(2, rs.getInt(1)); } #location 8 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testCSVCommonsUpsert_WithTimestamp() throws Exception { CSVParser parser = null; PhoenixConnection conn = null; try { // Create table String statements = "CREATE TABLE IF NOT EXISTS TS_TABLE " + "(ID BIGINT NOT NULL PRIMARY KEY, TS TIMESTAMP);"; conn = DriverManager.getConnection(getUrl()).unwrap( PhoenixConnection.class); PhoenixRuntime.executeStatements(conn, new StringReader(statements), null); // Upsert CSV file CSVCommonsLoader csvUtil = new CSVCommonsLoader(conn, "TS_TABLE", ImmutableList.<String>of(), true, ',', '"', null, "!"); csvUtil.upsert( new StringReader("ID,TS\n" + "1,1970-01-01 00:00:10\n" + "2,1970-01-01 00:00:10.123\n")); // Compare Phoenix ResultSet with CSV file content PreparedStatement statement = conn .prepareStatement("SELECT ID, TS FROM TS_TABLE ORDER BY ID"); ResultSet phoenixResultSet = statement.executeQuery(); assertTrue(phoenixResultSet.next()); assertEquals(1L, phoenixResultSet.getLong(1)); assertEquals(10000L, phoenixResultSet.getTimestamp(2).getTime()); assertTrue(phoenixResultSet.next()); assertEquals(2L, phoenixResultSet.getLong(1)); assertEquals(10123L, phoenixResultSet.getTimestamp(2).getTime()); assertFalse(phoenixResultSet.next()); } finally { if (parser != null) parser.close(); if (conn != null) conn.close(); } }
#vulnerable code @Test public void testCSVCommonsUpsert_WithTimestamp() throws Exception { CSVParser parser = null; PhoenixConnection conn = null; try { // Create table String statements = "CREATE TABLE IF NOT EXISTS TS_TABLE " + "(ID BIGINT NOT NULL PRIMARY KEY, TS TIMESTAMP);"; conn = DriverManager.getConnection(getUrl()).unwrap( PhoenixConnection.class); PhoenixRuntime.executeStatements(conn, new StringReader(statements), null); // Upsert CSV file CSVCommonsLoader csvUtil = new CSVCommonsLoader(conn, "TS_TABLE", null, true, ',', '"', null, "!"); csvUtil.upsert( new StringReader("ID,TS\n" + "1,1970-01-01 00:00:10\n" + "2,1970-01-01 00:00:10.123\n")); // Compare Phoenix ResultSet with CSV file content PreparedStatement statement = conn .prepareStatement("SELECT ID, TS FROM TS_TABLE ORDER BY ID"); ResultSet phoenixResultSet = statement.executeQuery(); assertTrue(phoenixResultSet.next()); assertEquals(1L, phoenixResultSet.getLong(1)); assertEquals(10000L, phoenixResultSet.getTimestamp(2).getTime()); assertTrue(phoenixResultSet.next()); assertEquals(2L, phoenixResultSet.getLong(1)); assertEquals(10123L, phoenixResultSet.getTimestamp(2).getTime()); assertFalse(phoenixResultSet.next()); } finally { if (parser != null) parser.close(); if (conn != null) conn.close(); } } #location 18 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testLocalIndexTableRegionSplitPolicyAndSplitKeys() throws Exception { createBaseTable(DATA_TABLE_NAME, null,"('e','i','o')"); Connection conn1 = DriverManager.getConnection(getUrl()); Connection conn2 = DriverManager.getConnection(getUrl()); conn1.createStatement().execute("CREATE LOCAL INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_NAME + "(v1)"); conn2.createStatement().executeQuery("SELECT * FROM " + DATA_TABLE_FULL_NAME).next(); HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin(); HTableDescriptor htd = admin.getTableDescriptor(TableName.valueOf(MetaDataUtil.getLocalIndexTableName(DATA_TABLE_NAME))); assertEquals(IndexRegionSplitPolicy.class.getName(), htd.getValue(HTableDescriptor.SPLIT_POLICY)); try (HTable userTable = new HTable(admin.getConfiguration(),TableName.valueOf(DATA_TABLE_NAME))) { try (HTable indexTable = new HTable(admin.getConfiguration(),TableName.valueOf(MetaDataUtil.getLocalIndexTableName(DATA_TABLE_NAME)))) { assertArrayEquals("Both user table and index table should have same split keys.", userTable.getStartKeys(), indexTable.getStartKeys()); } } }
#vulnerable code @Test public void testLocalIndexTableRegionSplitPolicyAndSplitKeys() throws Exception { createBaseTable(DATA_TABLE_NAME, null,"('e','i','o')"); Connection conn1 = DriverManager.getConnection(getUrl()); Connection conn2 = DriverManager.getConnection(getUrl()); conn1.createStatement().execute("CREATE LOCAL INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_NAME + "(v1)"); conn2.createStatement().executeQuery("SELECT * FROM " + DATA_TABLE_FULL_NAME).next(); HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin(); HTableDescriptor htd = admin.getTableDescriptor(TableName.valueOf(MetaDataUtil.getLocalIndexTableName(DATA_TABLE_NAME))); assertEquals(IndexRegionSplitPolicy.class.getName(), htd.getValue(HTableDescriptor.SPLIT_POLICY)); HTable userTable = new HTable(admin.getConfiguration(),TableName.valueOf(DATA_TABLE_NAME)); HTable indexTable = new HTable(admin.getConfiguration(),TableName.valueOf(MetaDataUtil.getLocalIndexTableName(DATA_TABLE_NAME))); assertArrayEquals("Both user table and index table should have same split keys.", userTable.getStartKeys(), indexTable.getStartKeys()); } #location 13 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public static long convertToMilliseconds(long serverTimeStamp) { return serverTimeStamp / TransactionFactory.getTransactionProvider().getTransactionContext().getMaxTransactionsPerSecond(); }
#vulnerable code public static long convertToMilliseconds(long serverTimeStamp) { return serverTimeStamp / TransactionFactory.getTransactionFactory().getTransactionContext().getMaxTransactionsPerSecond(); } #location 2 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testSelectUpsertWithOldClient() throws Exception { // Insert data with new client and read with old client executeQueriesWithCurrentVersion(CREATE_ADD); executeQueryWithClientVersion(compatibleClientVersion, QUERY); assertExpectedOutput(CREATE_ADD, QUERY); // Insert more data with old client and read with new client executeQueryWithClientVersion(compatibleClientVersion, ADD_DATA); executeQueriesWithCurrentVersion(QUERY_MORE); assertExpectedOutput(ADD_DATA, QUERY_MORE); }
#vulnerable code @Test public void testSelectUpsertWithOldClient() throws Exception { checkForPreConditions(); // Insert data with new client and read with old client executeQueriesWithCurrentVersion(CREATE_ADD); executeQueryWithClientVersion(compatibleClientVersion, QUERY); assertTrue(compareOutput(CREATE_ADD, QUERY)); // Insert more data with old client and read with new client executeQueryWithClientVersion(compatibleClientVersion, ADD_DATA); executeQueriesWithCurrentVersion(QUERY_MORE); assertTrue(compareOutput(ADD_DATA, QUERY_MORE)); } #location 3 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.