repo_id
stringclasses 205
values | file_path
stringlengths 33
141
| content
stringlengths 1
307k
| __index_level_0__
int64 0
0
|
---|---|---|---|
/home/johnshepherd/drake/systems/framework | /home/johnshepherd/drake/systems/framework/test/supervector_test.cc | #include "drake/systems/framework/supervector.h"
#include <memory>
#include <Eigen/Dense>
#include <gtest/gtest.h>
#include "drake/systems/framework/basic_vector.h"
#include "drake/systems/framework/vector_base.h"
namespace drake {
namespace systems {
namespace {
const int kLength = 9;
class SupervectorTest : public ::testing::Test {
protected:
void SetUp() override {
vec1_ = BasicVector<double>::Make({0, 1, 2, 3});
vec2_ = BasicVector<double>::Make({4, 5});
vec3_ = BasicVector<double>::Make({});
vec4_ = BasicVector<double>::Make({6, 7, 8});
supervector_ =
std::make_unique<Supervector<double>>(std::vector<VectorBase<double>*>{
vec1_.get(), vec2_.get(), vec3_.get(), vec4_.get()});
}
std::unique_ptr<VectorBase<double>> vec1_, vec2_, vec3_, vec4_;
std::unique_ptr<Supervector<double>> supervector_;
};
TEST_F(SupervectorTest, GetAtIndex) {
const auto& const_supervector = *supervector_;
ASSERT_EQ(kLength, supervector_->size());
for (int i = 0; i < kLength; ++i) {
EXPECT_EQ(i, supervector_->GetAtIndex(i));
EXPECT_EQ(i, const_supervector.GetAtIndex(i));
}
}
TEST_F(SupervectorTest, SetAtIndex) {
for (int i = 0; i < kLength; ++i) {
supervector_->SetAtIndex(i, i * 2);
}
for (int i = 0; i < kLength; ++i) {
EXPECT_EQ(i * 2, supervector_->GetAtIndex(i));
}
// Confirm the changes were written through to the constituent vectors.
EXPECT_EQ(0, vec1_->GetAtIndex(0));
EXPECT_EQ(2, vec1_->GetAtIndex(1));
EXPECT_EQ(4, vec1_->GetAtIndex(2));
EXPECT_EQ(6, vec1_->GetAtIndex(3));
EXPECT_EQ(8, vec2_->GetAtIndex(0));
EXPECT_EQ(10, vec2_->GetAtIndex(1));
EXPECT_EQ(12, vec4_->GetAtIndex(0));
EXPECT_EQ(14, vec4_->GetAtIndex(1));
EXPECT_EQ(16, vec4_->GetAtIndex(2));
}
// Tests that the Supervector can be addressed as an array.
TEST_F(SupervectorTest, ArrayOperator) {
(*supervector_)[5] = 42;
EXPECT_EQ(42, (*vec2_)[1]);
const auto& const_supervector = *supervector_;
EXPECT_EQ(8, const_supervector[8]);
}
TEST_F(SupervectorTest, OutOfRange) {
EXPECT_THROW(supervector_->GetAtIndex(-1), std::exception);
EXPECT_THROW(supervector_->GetAtIndex(10), std::exception);
EXPECT_THROW(supervector_->SetAtIndex(-1, 0.0), std::exception);
EXPECT_THROW(supervector_->SetAtIndex(10, 0.0), std::exception);
}
// Tests that a supervector can be SetFrom another vector.
TEST_F(SupervectorTest, SetFromVector) {
auto next_value = BasicVector<double>::Make({
10, 11, 12, 13, 14, 15, 16, 17, 18});
supervector_->SetFromVector(next_value->CopyToVector());
EXPECT_EQ(10, supervector_->GetAtIndex(0));
EXPECT_EQ(11, supervector_->GetAtIndex(1));
EXPECT_THROW(supervector_->SetFromVector(Eigen::Vector3d::Zero()),
std::exception);
}
// Tests that a supervector can be SetFrom another VectorBase.
TEST_F(SupervectorTest, SetFrom) {
auto next_value = BasicVector<double>::Make({
10, 11, 12, 13, 14, 15, 16, 17, 18});
supervector_->SetFrom(*next_value);
EXPECT_EQ(10, supervector_->GetAtIndex(0));
EXPECT_EQ(11, supervector_->GetAtIndex(1));
auto bad_value = BasicVector<double>::Make(3);
EXPECT_THROW(supervector_->SetFrom(*bad_value), std::exception);
}
TEST_F(SupervectorTest, Empty) {
Supervector<double> supervector(std::vector<VectorBase<double>*>{});
EXPECT_EQ(0, supervector.size());
}
} // namespace
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems/framework | /home/johnshepherd/drake/systems/framework/test/system_visitor_test.cc | #include "drake/systems/framework/system_visitor.h"
#include <memory>
#include <gtest/gtest.h>
#include "drake/systems/framework/diagram.h"
#include "drake/systems/framework/diagram_builder.h"
#include "drake/systems/framework/system.h"
#include "drake/systems/primitives/adder.h"
namespace drake {
namespace systems {
namespace {
typedef std::vector<const System<double>*> Systems;
class MyVisitor : public SystemVisitor<double> {
public:
void VisitSystem(const System<double>& system) final {
visited_systems_.push_back(&system);
}
void VisitDiagram(const Diagram<double>& diagram) final {
visited_diagrams_.push_back(&diagram);
}
const Systems& visited_systems() const { return visited_systems_; }
const Systems& visited_diagrams() const { return visited_diagrams_; }
private:
Systems visited_systems_{};
Systems visited_diagrams_{};
};
void VisitAndCheck(const System<double>& system,
const Systems& expected_visited_systems,
const Systems& expected_visited_diagrams) {
MyVisitor visitor;
system.Accept(&visitor);
EXPECT_EQ(visitor.visited_systems(), expected_visited_systems);
EXPECT_EQ(visitor.visited_diagrams(), expected_visited_diagrams);
}
} // namespace
// Construct a nested diagram and ensure the subcomponents are visited
// correctly.
GTEST_TEST(SystemVisitorTest, NestedDiagram) {
DiagramBuilder<double> builder;
const Adder<double>* adder = builder.AddSystem<Adder<double>>(1, 1);
DiagramBuilder<double> builder2;
const Diagram<double>* diagram = builder2.AddSystem(builder.Build());
auto diagram2 = builder2.Build();
VisitAndCheck(*adder, {adder}, {});
VisitAndCheck(*diagram, {}, {diagram});
// Confirm that we do NOT recurse automatically.
VisitAndCheck(*diagram2, {}, {diagram2.get()});
}
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems/framework | /home/johnshepherd/drake/systems/framework/test/subvector_test.cc | #include "drake/systems/framework/subvector.h"
#include <memory>
#include <Eigen/Dense>
#include <gtest/gtest.h>
#include "drake/common/autodiff.h"
#include "drake/common/test_utilities/eigen_matrix_compare.h"
#include "drake/systems/framework/basic_vector.h"
namespace drake {
namespace systems {
namespace {
const int kSubVectorLength = 2;
class SubvectorTest : public ::testing::Test {
protected:
void SetUp() override { vector_ = BasicVector<double>::Make({1, 2, 3, 4}); }
std::unique_ptr<VectorBase<double>> vector_;
};
TEST_F(SubvectorTest, NullptrVector) {
EXPECT_THROW(Subvector<double> subvec(nullptr, 0, 0), std::exception);
}
TEST_F(SubvectorTest, OutOfBoundsSubvector) {
EXPECT_THROW(Subvector<double>(vector_.get(), 1, 4), std::exception);
}
TEST_F(SubvectorTest, Access) {
Subvector<double> subvec(vector_.get(), 1, kSubVectorLength);
EXPECT_EQ(2, subvec.GetAtIndex(0));
EXPECT_EQ(3, subvec.GetAtIndex(1));
const auto& const_subvec = subvec;
EXPECT_EQ(2, const_subvec.GetAtIndex(0));
EXPECT_EQ(3, const_subvec.GetAtIndex(1));
}
// Tests that access out of bounds throws an exception.
TEST_F(SubvectorTest, OutOfRange) {
Subvector<double> subvec(vector_.get(), 1, kSubVectorLength);
EXPECT_THROW(subvec.GetAtIndex(-1), std::exception);
EXPECT_THROW(subvec.GetAtIndex(kSubVectorLength), std::exception);
EXPECT_THROW(subvec.SetAtIndex(-1, 0.0), std::exception);
EXPECT_THROW(subvec.SetAtIndex(kSubVectorLength, 0.0), std::exception);
}
TEST_F(SubvectorTest, Copy) {
Subvector<double> subvec(vector_.get(), 1, kSubVectorLength);
Eigen::Vector2d expected;
expected << 2, 3;
EXPECT_EQ(expected, subvec.CopyToVector());
Eigen::Vector2d pre_sized_good;
subvec.CopyToPreSizedVector(&pre_sized_good);
EXPECT_EQ(expected, pre_sized_good);
Eigen::Vector3d pre_sized_bad;
EXPECT_THROW(subvec.CopyToPreSizedVector(&pre_sized_bad),
std::exception);
}
// Tests that writes to the subvector pass through to the sliced vector.
TEST_F(SubvectorTest, Mutation) {
Subvector<double> subvec(vector_.get(), 1, kSubVectorLength);
VectorX<double> next_value(kSubVectorLength);
next_value << 5, 6;
subvec.SetFromVector(next_value);
EXPECT_EQ(5, subvec.GetAtIndex(0));
EXPECT_EQ(6, subvec.GetAtIndex(1));
subvec.SetAtIndex(1, 42);
EXPECT_EQ(1, vector_->GetAtIndex(0));
EXPECT_EQ(5, vector_->GetAtIndex(1));
EXPECT_EQ(42, vector_->GetAtIndex(2));
EXPECT_EQ(4, vector_->GetAtIndex(3));
EXPECT_THROW(subvec.SetFromVector(Eigen::Vector3d::Zero()), std::exception);
}
// Tests that a subvector can be SetFrom another VectorBase.
TEST_F(SubvectorTest, SetFrom) {
Subvector<double> subvec(vector_.get(), 1, kSubVectorLength);
auto next_value = BasicVector<double>::Make({7, 8});
subvec.SetFrom(*next_value);
EXPECT_EQ(7, subvec.GetAtIndex(0));
EXPECT_EQ(8, subvec.GetAtIndex(1));
auto bad_value = BasicVector<double>::Make({1, 2, 3});
EXPECT_THROW(subvec.SetFrom(*bad_value), std::exception);
}
// Tests that the Subvector can be addressed as an array.
TEST_F(SubvectorTest, ArrayOperator) {
Subvector<double> subvec(vector_.get(), 1, kSubVectorLength);
subvec[0] = 42;
EXPECT_EQ(42, vector_->GetAtIndex(1));
const auto& const_subvec = subvec;
EXPECT_EQ(3, const_subvec[1]);
}
// Tests that a VectorBase can be added to a Subvector.
TEST_F(SubvectorTest, PlusEq) {
const BasicVector<double> addend{7, 8};
Subvector<double> subvec(vector_.get(), 1, kSubVectorLength);
subvec += addend;
EXPECT_EQ(1, vector_->GetAtIndex(0));
EXPECT_EQ(9, vector_->GetAtIndex(1));
EXPECT_EQ(11, vector_->GetAtIndex(2));
EXPECT_EQ(4, vector_->GetAtIndex(3));
}
// Tests that a Subvector can be added to an Eigen vector.
TEST_F(SubvectorTest, ScaleAndAddToVector) {
VectorX<double> target(2);
target << 100, 1000;
Subvector<double> subvec(vector_.get(), 1, kSubVectorLength);
subvec.ScaleAndAddToVector(1, &target);
Eigen::Vector2d expected;
expected << 102, 1003;
EXPECT_EQ(expected, target);
}
// TODO(david-german-tri): Once GMock is available in the Drake build, add a
// test case demonstrating that the += operator on Subvector calls
// ScaleAndAddToVector on the addend.
TEST_F(SubvectorTest, PlusEqInvalidSize) {
BasicVector<double> addend(1);
Subvector<double> subvec(vector_.get(), 1, kSubVectorLength);
EXPECT_THROW(subvec += addend, std::exception);
}
TEST_F(SubvectorTest, AddToVectorInvalidSize) {
VectorX<double> target(3);
Subvector<double> subvec(vector_.get(), 1, kSubVectorLength);
EXPECT_THROW(subvec.ScaleAndAddToVector(1, &target), std::exception);
}
// Tests SetZero functionality in VectorBase.
TEST_F(SubvectorTest, SetZero) {
Subvector<double> subvec(vector_.get(), 0, kSubVectorLength);
subvec.SetZero();
for (int i = 0; i < subvec.size(); i++) {
EXPECT_EQ(subvec.GetAtIndex(i), 0);
}
}
// Tests all += * operations for VectorBase.
TEST_F(SubvectorTest, PlusEqScaled) {
Subvector<double> orig_vec(vector_.get(), 0, kSubVectorLength);
BasicVector<double> vec1{1, 2};
BasicVector<double> vec2{3, 5};
BasicVector<double> vec3{7, 11};
BasicVector<double> vec4{13, 17};
BasicVector<double> vec5{19, 23};
VectorBase<double>& v1 = vec1;
VectorBase<double>& v2 = vec2;
VectorBase<double>& v3 = vec3;
VectorBase<double>& v4 = vec4;
VectorBase<double>& v5 = vec5;
orig_vec.SetZero();
orig_vec.PlusEqScaled(2, v1);
EXPECT_EQ(orig_vec.GetAtIndex(0), 2);
EXPECT_EQ(orig_vec.GetAtIndex(1), 4);
orig_vec.SetZero();
orig_vec.PlusEqScaled({{2, v1}, {3, v2}});
EXPECT_EQ(orig_vec.GetAtIndex(0), 11);
EXPECT_EQ(orig_vec.GetAtIndex(1), 19);
orig_vec.SetZero();
orig_vec.PlusEqScaled({{2, v1}, {3, v2}, {5, v3}});
EXPECT_EQ(orig_vec.GetAtIndex(0), 46);
EXPECT_EQ(orig_vec.GetAtIndex(1), 74);
orig_vec.SetZero();
orig_vec.PlusEqScaled({{2, v1}, {3, v2}, {5, v3}, {7, v4}});
EXPECT_EQ(orig_vec.GetAtIndex(0), 137);
EXPECT_EQ(orig_vec.GetAtIndex(1), 193);
orig_vec.SetZero();
orig_vec.PlusEqScaled({{2, v1}, {3, v2}, {5, v3}, {7, v4}, {11, v5}});
EXPECT_EQ(orig_vec.GetAtIndex(0), 346);
EXPECT_EQ(orig_vec.GetAtIndex(1), 446);
}
} // namespace
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems/framework | /home/johnshepherd/drake/systems/framework/test/cache_test.cc | #include "drake/systems/framework/cache.h"
#include "drake/common/test_utilities/expect_no_throw.h"
// Tests the Context (runtime) side of caching, which consists of a
// Cache object containing CacheEntryValue objects, each intended to correspond
// to one of a System's CacheEntry objects. User interaction with the cache is
// normally through the CacheEntry objects; we're not attempting to test
// CacheEntry here. Consequently we have to create cache entry values by
// hand here which is clunky.
#include <memory>
#include <stdexcept>
#include <gtest/gtest.h>
#include "drake/common/test_utilities/expect_throws_message.h"
#include "drake/systems/framework/context_base.h"
#include "drake/systems/framework/test_utilities/my_vector.h"
#include "drake/systems/framework/test_utilities/pack_value.h"
using std::string;
using Eigen::Vector3d;
namespace drake {
namespace systems {
namespace {
// This is so we can use the contained cache & dependency graph objects, and
// so we can use the ContextBase cloning infrastructure to clone the cache.
class MyContextBase final : public ContextBase {
public:
MyContextBase() = default;
MyContextBase(const MyContextBase&) = default;
private:
std::unique_ptr<ContextBase> DoCloneWithoutPointers() const final {
return std::make_unique<MyContextBase>(*this);
}
};
// Dependency chains for cache entries here:
//
// +-----------+ +--------------+
// | time +--->| string_entry +-----------+
// +-+---------+ +--------------+ |
// | |
// | +------+ +------v-------+
// | | xc +------------------------> vector_entry +
// | +--+---+ +--------------+
// | |
// +-v-----v---+ +--------------+ +--------------+
// |all sources+--->| entry0 +----> entry1 +
// +-----------+ +------+-------+ +------+-------+
// | |
// | +------v-------+
// +------------> entry2 +
// +--------------+
//
// The dependencies for all_sources are set up automatically during
// Context construction; the others are set explicitly here.
//
// Value types:
// int: entry0,1,2
// string: string_entry
// MyVector3: vector_entry
class CacheTest : public ::testing::Test {
protected:
void SetUp() override {
// Manually dole out tickets and cache indexes.
index0_ = next_cache_index_++;
cache().CreateNewCacheEntryValue(index0_, next_ticket_++, "entry0",
{all_sources_ticket_}, &graph());
cache_value(index0_).SetInitialValue(PackValue(0));
index1_ = next_cache_index_++;
cache().CreateNewCacheEntryValue(index1_, next_ticket_++, "entry1",
{cache_value(index0_).ticket()},
&graph());
cache_value(index1_).SetInitialValue(PackValue(1));
index2_ = next_cache_index_++;
cache().CreateNewCacheEntryValue(
index2_, next_ticket_++, "entry2",
{cache_value(index0_).ticket(), cache_value(index1_).ticket()},
&graph());
cache_value(index2_).SetInitialValue(PackValue(2));
// Make the initial values up to date.
cache_value(index0_).mark_up_to_date();
cache_value(index1_).mark_up_to_date();
cache_value(index2_).mark_up_to_date();
string_index_ = next_cache_index_++;
cache().CreateNewCacheEntryValue(string_index_, next_ticket_++,
"string thing", {time_ticket_},
&graph());
cache_value(string_index_)
.SetInitialValue(AbstractValue::Make<string>("initial"));
EXPECT_TRUE(cache_value(string_index_).is_out_of_date());
cache_value(string_index_).mark_up_to_date();
vector_index_ = next_cache_index_++;
cache().CreateNewCacheEntryValue(
vector_index_, next_ticket_++, "vector thing",
{xc_ticket_, cache_value(string_index_).ticket()}, &graph());
const MyVector3d my_vec(Vector3d(0, 0, 0));
cache_value(vector_index_)
.SetInitialValue(AbstractValue::Make<MyVector3d>(my_vec));
EXPECT_TRUE(cache_value(vector_index_).is_out_of_date());
// set_value() should mark this up to date as a side effect.
cache_value(vector_index_).set_value(MyVector3d(Vector3d(99., 98., 97.)));
// Everything should be up to date to start out.
EXPECT_FALSE(cache_value(index0_).is_out_of_date());
EXPECT_FALSE(cache_value(index1_).is_out_of_date());
EXPECT_FALSE(cache_value(index2_).is_out_of_date());
EXPECT_FALSE(cache_value(string_index_).is_out_of_date());
EXPECT_FALSE(cache_value(vector_index_).is_out_of_date());
}
// Some sugar methods to shorten the calls. These default to the local Context
// but work with a given Context if needed.
CacheEntryValue& cache_value(CacheIndex index, Cache* cache_ptr = nullptr) {
if (!cache_ptr) cache_ptr = &cache();
return cache_ptr->get_mutable_cache_entry_value(index);
}
DependencyTracker& tracker(CacheIndex index,
ContextBase* context_ptr = nullptr) {
if (!context_ptr) context_ptr = &context_;
return tracker(
cache_value(index, &context_ptr->get_mutable_cache()).ticket(),
&*context_ptr);
}
Cache& cache(ContextBase* context_ptr = nullptr) {
if (!context_ptr) context_ptr = &context_;
return context_ptr->get_mutable_cache();
}
DependencyGraph& graph(ContextBase* context_ptr = nullptr) {
if (!context_ptr) context_ptr = &context_;
return context_ptr->get_mutable_dependency_graph();
}
DependencyTracker& tracker(DependencyTicket ticket,
ContextBase* context_ptr = nullptr) {
if (!context_ptr) context_ptr = &context_;
return graph(&*context_ptr).get_mutable_tracker(ticket);
}
std::unique_ptr<MyContextBase> context_ptr_ =
std::make_unique<MyContextBase>();
MyContextBase& context_ = *context_ptr_;
CacheIndex index0_, index1_, index2_;
CacheIndex string_index_, vector_index_;
const DependencyTicket nothing_ticket_{internal::kNothingTicket};
const DependencyTicket time_ticket_{internal::kTimeTicket};
const DependencyTicket z_ticket_{internal::kZTicket};
const DependencyTicket xc_ticket_{internal::kXcTicket};
const DependencyTicket xcdot_ticket_{internal::kXcdotTicket};
const DependencyTicket all_sources_ticket_{internal::kAllSourcesTicket};
DependencyTicket next_ticket_{internal::kNextAvailableTicket};
CacheIndex next_cache_index_{cache().cache_size()};
};
// Normally creating a new CacheEntryValue creates a new DependencyTracker to
// manage it. However, for well-known cached objects like the time derivatives
// cache entry xcdot, the tracker is created during Context construction and
// we are allowed to associate the cache entry value to it later.
TEST_F(CacheTest, CanAssociateExistingTrackerWithNewCacheEntry) {
// Check that creating a new cache entry value creates the matching tracker.
int expected_num_trackers = graph().trackers_size();
CacheIndex index(next_cache_index_++);
DependencyTicket ticket(next_ticket_++);
EXPECT_FALSE(graph().has_tracker(ticket));
CacheEntryValue& normal_value = cache().CreateNewCacheEntryValue(
index, ticket, "normal cache entry", {nothing_ticket_}, &graph());
++expected_num_trackers;
EXPECT_EQ(graph().trackers_size(), expected_num_trackers);
ASSERT_TRUE(graph().has_tracker(ticket));
EXPECT_EQ(graph().get_tracker(ticket).cache_entry_value(), &normal_value);
// Now check that we can attach a new cache entry value to an existing
// well-known tracker.
ASSERT_TRUE(graph().has_tracker(xcdot_ticket_));
const DependencyTracker& xcdot_tracker = graph().get_tracker(xcdot_ticket_);
EXPECT_EQ(xcdot_tracker.cache_entry_value(), nullptr);
index = next_cache_index_++;
CacheEntryValue& xcdot_value = cache().CreateNewCacheEntryValue(
index, xcdot_ticket_, "xcdot cache entry", {nothing_ticket_}, &graph());
// No new tracker should have been created.
EXPECT_EQ(graph().trackers_size(), expected_num_trackers);
EXPECT_EQ(graph().get_tracker(xcdot_ticket_).cache_entry_value(),
&xcdot_value);
}
// Check that SetInitialValue works and fails properly.
TEST_F(CacheTest, SetInitialValueWorks) {
// Check that a value got set properly. (Others are checked below.)
EXPECT_EQ(cache_value(index2_).GetValueOrThrow<int>(), 2);
// Check that trying to provide another initial value fails.
EXPECT_THROW(cache_value(index2_).SetInitialValue(PackValue(5)),
std::logic_error);
EXPECT_EQ(cache_value(index2_).GetValueOrThrow<int>(), 2); // No change.
// Check that an initial null value isn't allowed.
CacheIndex index(next_cache_index_++);
CacheEntryValue& value = cache().CreateNewCacheEntryValue(
index, next_ticket_++, "null value", {nothing_ticket_}, &graph());
EXPECT_FALSE(value.has_value());
EXPECT_THROW(
cache_value(index).SetInitialValue(std::unique_ptr<AbstractValue>()),
std::logic_error);
EXPECT_FALSE(value.has_value()); // No change.
}
// Check that a chain of dependent cache entries gets invalidated properly.
// More extensive testing of dependency tracking is in the unit test for
// dependency trackers.
TEST_F(CacheTest, InvalidationWorks) {
// Everything starts out up to date. This should invalidate everything.
tracker(time_ticket_).NoteValueChange(99);
EXPECT_TRUE(cache_value(index0_).is_out_of_date());
EXPECT_TRUE(cache_value(index1_).is_out_of_date());
EXPECT_TRUE(cache_value(index2_).is_out_of_date());
EXPECT_TRUE(cache_value(string_index_).is_out_of_date());
EXPECT_TRUE(cache_value(vector_index_).is_out_of_date());
}
// Make sure the debugging routine that invalidates everything works.
TEST_F(CacheTest, InvalidateAllWorks) {
// Everything starts out up to date. This should invalidate everything.
context_.SetAllCacheEntriesOutOfDate();
EXPECT_TRUE(cache_value(index0_).is_out_of_date());
EXPECT_TRUE(cache_value(index1_).is_out_of_date());
EXPECT_TRUE(cache_value(index2_).is_out_of_date());
EXPECT_TRUE(cache_value(string_index_).is_out_of_date());
EXPECT_TRUE(cache_value(vector_index_).is_out_of_date());
}
// Make sure the debugging routines to disable and re-enable caching work, and
// are independent of the out of date flags.
TEST_F(CacheTest, DisableCachingWorks) {
CacheEntryValue& int_val = cache_value(index1_);
CacheEntryValue& str_val = cache_value(string_index_);
CacheEntryValue& vec_val = cache_value(vector_index_);
// Everything starts out up to date. Memorize serial numbers.
int64_t ser_int = int_val.serial_number();
int64_t ser_str = str_val.serial_number();
int64_t ser_vec = vec_val.serial_number();
EXPECT_FALSE(int_val.needs_recomputation());
EXPECT_FALSE(str_val.needs_recomputation());
EXPECT_FALSE(vec_val.needs_recomputation());
context_.DisableCaching();
EXPECT_TRUE(int_val.is_cache_entry_disabled()); // Just check one.
// The out_of_date flag shouldn't be affected, but now we need recomputation.
EXPECT_FALSE(int_val.is_out_of_date());
EXPECT_FALSE(str_val.is_out_of_date());
EXPECT_FALSE(vec_val.is_out_of_date());
EXPECT_TRUE(int_val.needs_recomputation());
EXPECT_TRUE(str_val.needs_recomputation());
EXPECT_TRUE(vec_val.needs_recomputation());
// Flags are still supposed to be functioning while caching is disabled,
// even though they are mostly ignored. (The Get() method still depends
// on them.)
int_val.mark_out_of_date();
str_val.mark_out_of_date();
vec_val.mark_out_of_date();
// Eval() should recalculate and mark entries up to date..
int_val.SetValueOrThrow(101);
str_val.SetValueOrThrow(string("hello there"));
cache_value(vector_index_).SetValueOrThrow(MyVector3d(Vector3d(4., 5., 6.)));
EXPECT_FALSE(int_val.is_out_of_date());
EXPECT_FALSE(str_val.is_out_of_date());
EXPECT_FALSE(vec_val.is_out_of_date());
++ser_int; ++ser_str; ++ser_vec;
EXPECT_EQ(int_val.serial_number(), ser_int);
EXPECT_EQ(str_val.serial_number(), ser_str);
EXPECT_EQ(vec_val.serial_number(), ser_vec);
EXPECT_EQ(int_val.get_value<int>(), 101);
EXPECT_EQ(str_val.get_value<string>(), "hello there");
EXPECT_EQ(vec_val.get_value<MyVector3d>().get_value(),
Vector3d(4., 5., 6.));
// Should still need recomputation even though we just did it.
EXPECT_TRUE(int_val.needs_recomputation());
EXPECT_TRUE(str_val.needs_recomputation());
EXPECT_TRUE(vec_val.needs_recomputation());
// Now re-enable caching and verify that it works.
context_.EnableCaching();
EXPECT_FALSE(int_val.is_cache_entry_disabled()); // Just check one.
// Since the out_of_date flag was still functioning with caching disabled,
// we don't need to recompute now.
EXPECT_FALSE(int_val.needs_recomputation());
EXPECT_FALSE(str_val.needs_recomputation());
EXPECT_FALSE(vec_val.needs_recomputation());
// And we can still grab the previously-computed values.
EXPECT_EQ(int_val.get_value<int>(), 101);
EXPECT_EQ(str_val.get_value<string>(), "hello there");
EXPECT_EQ(vec_val.get_value<MyVector3d>().get_value(),
Vector3d(4., 5., 6.));
// Blanket forced recomputation should work though.
context_.SetAllCacheEntriesOutOfDate();
EXPECT_TRUE(int_val.needs_recomputation());
EXPECT_TRUE(str_val.needs_recomputation());
EXPECT_TRUE(vec_val.needs_recomputation());
EXPECT_TRUE(int_val.is_out_of_date());
EXPECT_TRUE(str_val.is_out_of_date());
EXPECT_TRUE(vec_val.is_out_of_date());
}
// Freezing the cache should prevent mutable access to any out-of-date value.
// These are the mutable methods:
// SetValueOrThrow<T>()
// set_value<V>()
// GetMutableAbstractValueOrThrow()
// GetMutableValueOrThrow<V>()
// swap_value()
TEST_F(CacheTest, FreezeUnfreezeWork) {
// Test that the flag gets set and reset.
EXPECT_FALSE(context_.is_cache_frozen());
context_.FreezeCache();
EXPECT_TRUE(context_.is_cache_frozen());
context_.UnfreezeCache();
EXPECT_FALSE(context_.is_cache_frozen());
CacheEntryValue& str_val = cache_value(string_index_);
EXPECT_FALSE(str_val.is_out_of_date());
// All mutable methods should be OK here as long as the entry is out of date.
str_val.mark_out_of_date();
DRAKE_EXPECT_NO_THROW(str_val.SetValueOrThrow<std::string>("one"));
str_val.mark_out_of_date();
DRAKE_EXPECT_NO_THROW(str_val.set_value<std::string>("two"));
str_val.mark_out_of_date();
// The next two leave the entry out of date.
DRAKE_EXPECT_NO_THROW(str_val.GetMutableAbstractValueOrThrow());
DRAKE_EXPECT_NO_THROW(str_val.GetMutableValueOrThrow<std::string>());
auto swapper = AbstractValue::Make<std::string>("for swapping");
DRAKE_EXPECT_NO_THROW(str_val.swap_value(&swapper));
// With cache frozen but up to date, check some const methods to make sure
// they still work.
str_val.mark_up_to_date();
context_.FreezeCache();
DRAKE_EXPECT_NO_THROW(str_val.GetAbstractValueOrThrow());
DRAKE_EXPECT_NO_THROW(str_val.GetValueOrThrow<std::string>());
DRAKE_EXPECT_NO_THROW(str_val.get_value<std::string>());
// Const methods still fail if entry is out of date (just check one).
str_val.mark_out_of_date();
DRAKE_EXPECT_THROWS_MESSAGE(str_val.GetValueOrThrow<std::string>(),
".*string thing.*GetValueOrThrow.*out of date.*");
// But, all mutable methods should fail now. "Set" methods should leave the
// cache entry out of date.
DRAKE_EXPECT_THROWS_MESSAGE(
str_val.SetValueOrThrow<std::string>("three"),
".*string thing.*SetValueOrThrow.*cache is frozen.*");
// (Despite the snake_case name, this still checks for frozen cache.)
DRAKE_EXPECT_THROWS_MESSAGE(str_val.set_value<std::string>("four"),
".*string thing.*set_value.*cache is frozen.*");
DRAKE_EXPECT_THROWS_MESSAGE(
str_val.GetMutableAbstractValueOrThrow(),
".*string thing.*GetMutableAbstractValueOrThrow.*cache is frozen.*");
DRAKE_EXPECT_THROWS_MESSAGE(
str_val.GetMutableValueOrThrow<std::string>(),
".*string thing.*GetMutableValueOrThrow.*cache is frozen.*");
DRAKE_EXPECT_THROWS_MESSAGE(
str_val.swap_value(&swapper),
".*string thing.*swap_value.*cache is frozen.*");
}
// Test that the vector-valued cache entry works and preserved the underlying
// concrete type.
TEST_F(CacheTest, VectorCacheEntryWorks) {
CacheEntryValue& entry_value = cache_value(vector_index_);
// Entry was marked up to date during construction.
EXPECT_FALSE(entry_value.is_out_of_date());
const MyVector3d& contents = entry_value.get_value<MyVector3d>();
Vector3d eigen_contents = contents.get_value();
EXPECT_EQ(eigen_contents, Vector3d(99., 98., 97.));
// Invalidate by pretending we modified a z, which should
// invalidate this xc-dependent cache entry.
tracker(z_ticket_).NoteValueChange(1001);
EXPECT_TRUE(entry_value.is_out_of_date());
entry_value.set_value(MyVector3d(Vector3d(3., 2., 1.)));
EXPECT_FALSE(entry_value.is_out_of_date());
const MyVector3d& contents2 = entry_value.get_value<MyVector3d>();
Vector3d eigen_contents2 = contents2.get_value();
EXPECT_EQ(eigen_contents2, Vector3d(3., 2., 1.));
// TODO(sherm1) Value<MyVector3d> treats the vector as non-assignable so
// we can't insist here that &contents2 == &contents.
}
// Test that we can swap in a new value if it has the right type, and that
// the swapped-in value is invalid immediately after swapping. In Debug,
// test that we throw if the swapped-in value is null or has the
// wrong type.
TEST_F(CacheTest, CanSwapValue) {
CacheEntryValue& entry_value = cache_value(string_index_);
EXPECT_FALSE(entry_value.is_out_of_date()); // Set to "initial".
EXPECT_EQ(entry_value.get_value<string>(), "initial");
auto new_value = AbstractValue::Make<string>("new value");
entry_value.swap_value(&new_value);
EXPECT_EQ(new_value->get_value<string>(), "initial");
EXPECT_TRUE(entry_value.is_out_of_date());
entry_value.mark_up_to_date();
EXPECT_EQ(entry_value.get_value<string>(), "new value");
// In Debug builds, try a bad swap and expect it to be caught.
if (kDrakeAssertIsArmed) {
std::unique_ptr<AbstractValue> empty_ptr;
EXPECT_THROW(entry_value.swap_value(&empty_ptr), std::logic_error);
auto bad_value = AbstractValue::Make<int>(29);
EXPECT_THROW(entry_value.swap_value(&bad_value), std::logic_error);
}
}
TEST_F(CacheTest, InvalidationIsRecursive) {
cache_value(index1_).mark_out_of_date();
tracker(index1_).NoteValueChange(100); // Arbitrary unique change event.
EXPECT_EQ(0, cache_value(index0_).get_value<int>());
EXPECT_EQ(0, cache_value(index0_).GetValueOrThrow<int>());
EXPECT_TRUE(cache_value(index1_).is_out_of_date());
EXPECT_TRUE(cache_value(index2_).is_out_of_date());
}
TEST_F(CacheTest, Clone) {
// Make up a cache index that is guaranteed to leave a gap to make sure
// we test handling of missing entries properly.
next_cache_index_ += 3;
CacheIndex last_index(next_cache_index_++);
cache().CreateNewCacheEntryValue(last_index, next_ticket_++,
"last entry", {nothing_ticket_},
&graph());
cache_value(last_index).SetInitialValue(PackValue(42));
EXPECT_TRUE(cache_value(last_index).is_out_of_date());
cache_value(last_index).mark_up_to_date();
// Create a clone of the cache and dependency graph.
auto clone_context_ptr = context_.Clone();
MyContextBase& clone_context =
dynamic_cast<MyContextBase&>(*clone_context_ptr);
Cache& clone_cache = cache(&clone_context);
// Now study the copied cache to see if it got copied correctly.
// The copy should have the same size as the original, including empty slots.
// Can't go on if this fails so ASSERT.
ASSERT_EQ(clone_cache.cache_size(), cache().cache_size());
for (CacheIndex index(0); index < cache().cache_size(); ++index) {
EXPECT_EQ(cache().has_cache_entry_value(index),
clone_cache.has_cache_entry_value(index));
if (!cache().has_cache_entry_value(index)) continue;
const CacheEntryValue& value = cache().get_cache_entry_value(index);
const CacheEntryValue& clone_value =
clone_cache.get_cache_entry_value(index);
EXPECT_NE(&clone_value, &value);
// Test that the new cache entry is valid and is owned by the new context.
// This is also a unit test for ThrowIfBadCacheEntryValue().
DRAKE_EXPECT_NO_THROW(value.ThrowIfBadCacheEntryValue(&context_));
DRAKE_EXPECT_NO_THROW(
clone_value.ThrowIfBadCacheEntryValue(&clone_context));
EXPECT_THROW(clone_value.ThrowIfBadCacheEntryValue(&context_),
std::logic_error);
EXPECT_EQ(clone_value.description(), value.description());
EXPECT_EQ(clone_value.has_value(), value.has_value());
EXPECT_EQ(clone_value.cache_index(), value.cache_index());
EXPECT_EQ(clone_value.ticket(), value.ticket());
EXPECT_EQ(clone_value.serial_number(), value.serial_number());
// If there is a value, the clone_cache should not have the same memory
// address.
if (value.has_value()) {
EXPECT_NE(&clone_value.get_abstract_value(),
&value.get_abstract_value());
}
// Make sure the tracker got copied and that the new one refers to the
// new cache entry, not the old one. OTOH the ticket should be unchanged.
const DependencyTracker& value_tracker = tracker(value.ticket());
const DependencyTracker& clone_value_tracker =
tracker(value.ticket(), &clone_context);
DRAKE_EXPECT_NO_THROW(
value_tracker.ThrowIfBadDependencyTracker(&context_, &value));
DRAKE_EXPECT_NO_THROW(clone_value_tracker.ThrowIfBadDependencyTracker(
&clone_context, &clone_value));
EXPECT_EQ(value_tracker.ticket(), value.ticket());
EXPECT_EQ(clone_value_tracker.ticket(), value.ticket());
}
// The clone_cache should have the same values.
EXPECT_EQ(cache_value(index0_, &clone_cache).GetValueOrThrow<int>(),
cache_value(index0_).GetValueOrThrow<int>());
EXPECT_EQ(cache_value(index1_, &clone_cache).GetValueOrThrow<int>(),
cache_value(index1_).GetValueOrThrow<int>());
EXPECT_EQ(cache_value(index2_, &clone_cache).GetValueOrThrow<int>(),
cache_value(index2_).GetValueOrThrow<int>());
EXPECT_EQ(cache_value(string_index_, &clone_cache).GetValueOrThrow<string>(),
cache_value(string_index_).GetValueOrThrow<string>());
EXPECT_EQ(cache_value(vector_index_,
&clone_cache).GetValueOrThrow<MyVector3d>()
.get_value(),
cache_value(vector_index_).GetValueOrThrow<MyVector3d>()
.get_value());
EXPECT_EQ(cache_value(last_index, &clone_cache).GetValueOrThrow<int>(),
cache_value(last_index).GetValueOrThrow<int>());
// Changes to the clone_cache should not affect the original.
cache_value(index2_, &clone_cache).mark_out_of_date(); // Invalidate.
cache_value(index2_,
&clone_cache).set_value<int>(99); // Set new value & validate.
EXPECT_EQ(cache_value(index2_, &clone_cache).get_value<int>(), 99);
EXPECT_EQ(cache_value(index2_).get_value<int>(), 2);
// This should invalidate everything in the original cache, but nothing
// in the clone_cache. Just check one entry as representative.
tracker(time_ticket_).NoteValueChange(10);
EXPECT_TRUE(cache_value(string_index_).is_out_of_date());
EXPECT_FALSE(cache_value(string_index_, &clone_cache).is_out_of_date());
// Try an invalidation in the clone_cache to make sure the dependency graph is
// operational there.
tracker(xc_ticket_, &clone_context).NoteValueChange(10);
EXPECT_FALSE(cache_value(string_index_, &clone_cache).is_out_of_date());
EXPECT_TRUE(cache_value(vector_index_, &clone_cache).is_out_of_date());
EXPECT_TRUE(cache_value(index0_, &clone_cache).is_out_of_date());
EXPECT_TRUE(cache_value(index1_, &clone_cache).is_out_of_date());
EXPECT_TRUE(cache_value(index2_, &clone_cache).is_out_of_date());
}
// Test that the Get(), Set(), GetMutable() and Peek() methods work and catch
// errors appropriately. (This test is at the end because it throws so many
// exceptions that it is hard to run through in the debugger.)
TEST_F(CacheTest, ValueMethodsWork) {
CacheIndex index(next_cache_index_++);
cache().CreateNewCacheEntryValue(index, next_ticket_++,
"get test", {nothing_ticket_},
&graph());
CacheEntryValue& value = cache_value(index);
EXPECT_EQ(value.cache_index(), index);
EXPECT_EQ(value.ticket(), next_ticket_-1);
EXPECT_EQ(value.description(), "get test");
auto swap_with_me = AbstractValue::Make<int>(29);
// There is currently no value stored in the new entry. All "throw" methods
// should fail, and fast methods should fail in Debug builds.
EXPECT_THROW(value.GetAbstractValueOrThrow(), std::logic_error);
EXPECT_THROW(value.GetValueOrThrow<int>(), std::logic_error);
EXPECT_THROW(value.SetValueOrThrow<int>(5), std::logic_error);
EXPECT_THROW(value.GetMutableAbstractValueOrThrow(), std::logic_error);
EXPECT_THROW(value.GetMutableValueOrThrow<int>(), std::logic_error);
EXPECT_THROW(value.PeekAbstractValueOrThrow(), std::logic_error);
EXPECT_THROW(value.PeekValueOrThrow<int>(), std::logic_error);
if (kDrakeAssertIsArmed) {
EXPECT_THROW(value.get_abstract_value(), std::logic_error);
EXPECT_THROW(value.get_value<int>(), std::logic_error);
EXPECT_THROW(value.set_value<int>(5), std::logic_error);
EXPECT_THROW(value.is_out_of_date(), std::logic_error);
EXPECT_THROW(value.needs_recomputation(), std::logic_error);
EXPECT_THROW(value.mark_up_to_date(), std::logic_error);
EXPECT_THROW(value.swap_value(&swap_with_me), std::logic_error);
}
// Now provide an initial value (not yet up to date).
value.SetInitialValue(PackValue(42));
// Nope, only allowed once.
EXPECT_THROW(value.SetInitialValue(PackValue(42)), std::logic_error);
EXPECT_TRUE(value.is_out_of_date()); // Initial value is not up to date.
// "Get" methods should fail, "GetMutable" and "Peek" succeed.
EXPECT_THROW(value.GetValueOrThrow<int>(), std::logic_error);
EXPECT_THROW(value.GetAbstractValueOrThrow(), std::logic_error);
DRAKE_EXPECT_NO_THROW(value.GetMutableValueOrThrow<int>());
DRAKE_EXPECT_NO_THROW(value.GetMutableAbstractValueOrThrow());
DRAKE_EXPECT_NO_THROW(value.PeekValueOrThrow<int>());
DRAKE_EXPECT_NO_THROW(value.PeekAbstractValueOrThrow());
// The fast "get" methods must check for up to date in Debug builds.
if (kDrakeAssertIsArmed) {
EXPECT_THROW(value.get_value<int>(), std::logic_error);
EXPECT_THROW(value.get_abstract_value(), std::logic_error);
}
// Swap doesn't care about up to date or not, but always marks the swapped-in
// value out of date.
value.swap_value(&swap_with_me);
EXPECT_TRUE(value.is_out_of_date()); // Still out of date.
EXPECT_EQ(value.PeekValueOrThrow<int>(), 29);
EXPECT_EQ(swap_with_me->get_value<int>(), 42);
value.GetMutableValueOrThrow<int>() = 43;
EXPECT_EQ(value.PeekValueOrThrow<int>(), 43);
value.SetValueOrThrow<int>(44); // Check non-throw functioning.
EXPECT_EQ(value.PeekValueOrThrow<int>(), 44);
// Next, mark this up to date and check behavior. Now "Get" and "Peek"
// methods should succeed but "GetMutable" and "Set" methods should fail.
value.mark_up_to_date();
DRAKE_EXPECT_NO_THROW(value.get_abstract_value());
DRAKE_EXPECT_NO_THROW(value.get_value<int>());
DRAKE_EXPECT_NO_THROW(value.GetValueOrThrow<int>());
DRAKE_EXPECT_NO_THROW(value.GetAbstractValueOrThrow());
DRAKE_EXPECT_NO_THROW(value.PeekValueOrThrow<int>());
DRAKE_EXPECT_NO_THROW(value.PeekAbstractValueOrThrow());
EXPECT_THROW(value.GetMutableValueOrThrow<int>(), std::logic_error);
EXPECT_THROW(value.GetMutableAbstractValueOrThrow(), std::logic_error);
EXPECT_THROW(value.SetValueOrThrow<int>(5), std::logic_error);
// The fast "set" method must check for up to date in Debug builds.
if (kDrakeAssertIsArmed) {
EXPECT_THROW(value.set_value<int>(5), std::logic_error);
}
// And "swap" still doesn't care about up to date on entry.
value.swap_value(&swap_with_me);
EXPECT_TRUE(value.is_out_of_date()); // Should have changed.
EXPECT_EQ(value.PeekValueOrThrow<int>(), 42);
EXPECT_EQ(swap_with_me->get_value<int>(), 44);
value.mark_up_to_date();
// Get the same value as concrete or abstract type.
EXPECT_EQ(42, value.get_value<int>());
const AbstractValue& abstract_value =
cache().get_cache_entry_value(index).GetAbstractValueOrThrow();
EXPECT_EQ(42, UnpackIntValue(abstract_value));
CacheEntryValue& string_value = cache_value(string_index_);
EXPECT_EQ(string_value.GetValueOrThrow<string>(), "initial");
// This is the wrong value type.
EXPECT_THROW(string_value.GetValueOrThrow<int>(), std::logic_error);
}
} // namespace
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems/framework | /home/johnshepherd/drake/systems/framework/test/output_port_test.cc | #include "drake/common/test_utilities/expect_no_throw.h"
/* clang-format off to disable clang-format-includes */
#include "drake/systems/framework/diagram.h"
#include "drake/systems/framework/leaf_output_port.h"
#include "drake/systems/framework/output_port.h"
/* clang-format on */
#include <memory>
#include <stdexcept>
#include <Eigen/Dense>
#include <gtest/gtest.h>
#include "drake/common/never_destroyed.h"
#include "drake/common/test_utilities/expect_throws_message.h"
#include "drake/systems/framework/basic_vector.h"
#include "drake/systems/framework/context.h"
#include "drake/systems/framework/diagram_builder.h"
#include "drake/systems/framework/leaf_system.h"
#include "drake/systems/framework/system.h"
#include "drake/systems/framework/system_output.h"
#include "drake/systems/framework/test_utilities/my_vector.h"
namespace drake {
namespace systems {
namespace {
using Eigen::Vector2d;
using Eigen::Vector3d;
using std::string;
using std::unique_ptr;
using std::make_unique;
// A hollow shell of a System.
class DummySystem : public LeafSystem<double> {
public:
DummySystem() {}
~DummySystem() override {}
using SystemBase::DeclareCacheEntry;
using SystemBase::assign_next_dependency_ticket;
using SystemBase::get_system_id;
};
// The only concrete output ports we expect to encounter are LeafOutputPort
// and DiagramOutputPort. Those are well behaved so don't trigger some of the
// base class error messages. Hence this feeble port. Derived classes will
// introduce errors.
class MyOutputPort : public OutputPort<double> {
public:
MyOutputPort(DummySystem* dummy, OutputPortIndex index,
DependencyTicket ticket)
: OutputPort<double>(dummy, dummy, dummy->get_system_id(),
"my_output", index, ticket, kVectorValued, 2) {}
std::unique_ptr<AbstractValue> DoAllocate() const override {
return AbstractValue::Make<BasicVector<double>>(
MyVector2d(Vector2d(1., 2.)));
}
void DoCalc(const Context<double>&, AbstractValue* value) const override {
EXPECT_NE(value, nullptr);
DRAKE_EXPECT_NO_THROW(
value->set_value<BasicVector<double>>(MyVector2d(Vector2d(3., 4.))));
}
const AbstractValue& DoEval(const Context<double>& context) const override {
// This is a fake cache entry for Eval to "update".
static never_destroyed<std::unique_ptr<AbstractValue>> temp(Allocate());
Calc(context, temp.access().get());
return *temp.access();
}
internal::OutputPortPrerequisite DoGetPrerequisite() const override {
ADD_FAILURE() << "We won't call this.";
return {};
}
void ThrowIfInvalidPortValueType(
const Context<double>&,
const AbstractValue& proposed_value) const final {
// Note: this is a very expensive way to check -- fine for this test
// case (which has no alternative) but don't copy into real code!
auto good_value = Allocate();
if (proposed_value.type_info() != good_value->type_info()) {
throw std::logic_error(fmt::format(
"OutputPort::Calc(): expected output type {} "
"but got {} for {}.",
good_value->GetNiceTypeName(), proposed_value.GetNiceTypeName(),
PortBase::GetFullDescription()));
}
}
};
class MyStringAllocatorPort : public MyOutputPort {
public:
using MyOutputPort::MyOutputPort;
// Allocator returns string but should have returned BasicVector.
std::unique_ptr<AbstractValue> DoAllocate() const override {
return AbstractValue::Make<std::string>("hello");
}
};
class MyBadSizeAllocatorPort : public MyOutputPort {
public:
using MyOutputPort::MyOutputPort;
// Allocator returns 3-element vector but should have returned 2-element.
std::unique_ptr<AbstractValue> DoAllocate() const override {
return AbstractValue::Make<BasicVector<double>>(
MyVector3d(Vector3d(1., 2., 3.)));
}
};
class MyNullAllocatorPort : public MyOutputPort {
public:
using MyOutputPort::MyOutputPort;
// Allocator returns nullptr.
std::unique_ptr<AbstractValue> DoAllocate() const override { return nullptr; }
};
// These "bad allocator" messages are likely to be caught earlier by
// LeafOutputPort when it delegates to cache entries. These base class messages
// may issue in case of (a) future addition of uncached concrete output ports,
// or (b) implementation changes to existing ports or error handling in caching.
GTEST_TEST(TestBaseClass, BadAllocators) {
DummySystem dummy;
MyStringAllocatorPort string_allocator{&dummy, OutputPortIndex(0),
dummy.assign_next_dependency_ticket()};
MyBadSizeAllocatorPort bad_size_allocator{
&dummy, OutputPortIndex(1),
dummy.assign_next_dependency_ticket()};
MyNullAllocatorPort null_allocator{&dummy, OutputPortIndex(2),
dummy.assign_next_dependency_ticket()};
DRAKE_EXPECT_THROWS_MESSAGE_IF_ARMED(
string_allocator.Allocate(),
"OutputPort::Allocate().*expected BasicVector.*but got.*std::string"
".*OutputPort\\[0\\].*");
DRAKE_EXPECT_THROWS_MESSAGE_IF_ARMED(
bad_size_allocator.Allocate(),
"OutputPort::Allocate().*expected vector.*size 2.*but got.*size 3"
".*OutputPort\\[1\\].*");
// Nullptr check is unconditional.
DRAKE_EXPECT_THROWS_MESSAGE(
null_allocator.Allocate(),
"OutputPort::Allocate().*nullptr.*OutputPort\\[2\\].*");
}
// This message can be caused by user action.
GTEST_TEST(TestBaseClass, BadOutputType) {
DummySystem dummy;
MyOutputPort port{&dummy, OutputPortIndex(0),
dummy.assign_next_dependency_ticket()};
auto context = dummy.AllocateContext();
auto good_port_value = port.Allocate();
auto bad_port_value = AbstractValue::Make<std::string>("hi there");
DRAKE_EXPECT_NO_THROW(port.Calc(*context, good_port_value.get()));
// This message is thrown in Debug. In Release some other error may trigger
// but not from OutputPort, so we can't use
// DRAKE_EXPECT_THROWS_MESSAGE_IF_ARMED() which would insist that if any
// message is thrown in Release it must be the expected one.
if (kDrakeAssertIsArmed) {
DRAKE_EXPECT_THROWS_MESSAGE(
port.Calc(*context, bad_port_value.get()),
"OutputPort::Calc().*expected.*MyVector.*but got.*std::string"
".*OutputPort\\[0\\].*");
}
}
// These functions match the signatures required by LeafOutputPort.
// AllocCallback that returns a string in an AbstractValue.
unique_ptr<AbstractValue> alloc_string() {
return AbstractValue::Make(string("from alloc_string"));
}
// AllocCallback that returns a MyVector3d(-1,-2,-3), wrapped in a
// Value<BasicVector>.
unique_ptr<AbstractValue> alloc_myvector3() {
return Value<BasicVector<double>>(MyVector3d::Make(-1., -2., -3.)).Clone();
}
// CalcCallback that expects to have a string to write on.
void calc_string(const ContextBase&, AbstractValue* value) {
ASSERT_NE(value, nullptr);
string& str_value = value->get_mutable_value<string>();
str_value = "from calc_string";
}
// CalcVectorCallback sets the 3-element output vector to 99,100,101.
void calc_vector3(const ContextBase&, AbstractValue* value) {
ASSERT_NE(value, nullptr);
auto& vec = value->template get_mutable_value<BasicVector<double>>();
EXPECT_EQ(vec.size(), 3);
vec.set_value(Vector3d(99., 100., 101.));
}
// This class creates some isolated ports we can play with. They are not
// actually part of a System. There are lots of tests of Systems that have
// output ports elsewhere; that's not what we're trying to test here.
class LeafOutputPortTest : public ::testing::Test {
protected:
// Create abstract- and vector-valued ports.
DummySystem dummy_;
// TODO(sherm1) Use implicit_cast when available (from abseil).
std::unique_ptr<LeafOutputPort<double>> absport_general_ptr_ =
internal::FrameworkFactory::Make<LeafOutputPort<double>>(
&dummy_, // implicit_cast<const System<T>*>(&dummy_)
&dummy_, // implicit_cast<SystemBase*>(&dummy_)
dummy_.get_system_id(),
"absport",
OutputPortIndex(dummy_.num_output_ports()),
dummy_.assign_next_dependency_ticket(), kAbstractValued, 0 /* size */,
&dummy_.DeclareCacheEntry(
"absport", ValueProducer(alloc_string, calc_string)));
LeafOutputPort<double>& absport_general_ = *absport_general_ptr_;
std::unique_ptr<LeafOutputPort<double>> vecport_general_ptr_ =
internal::FrameworkFactory::Make<LeafOutputPort<double>>(
&dummy_, // implicit_cast<const System<T>*>(&dummy_)
&dummy_, // implicit_cast<SystemBase*>(&dummy_)
dummy_.get_system_id(),
"vecport",
OutputPortIndex(dummy_.num_output_ports()),
dummy_.assign_next_dependency_ticket(), kVectorValued, 3 /* size */,
&dummy_.DeclareCacheEntry(
"vecport", ValueProducer(alloc_myvector3, calc_vector3)));
LeafOutputPort<double>& vecport_general_ = *vecport_general_ptr_;
unique_ptr<Context<double>> context_{dummy_.CreateDefaultContext()};
};
// Helper function for testing an abstract-valued port.
void AbstractPortCheck(const Context<double>& context,
const LeafOutputPort<double>& port,
string alloc_string) {
unique_ptr<AbstractValue> val = port.Allocate();
EXPECT_EQ(val->get_value<string>(), alloc_string);
port.Calc(context, val.get());
const string new_value("from calc_string");
EXPECT_EQ(val->get_value<string>(), new_value);
EXPECT_EQ(port.Eval<string>(context), new_value);
EXPECT_EQ(port.Eval<AbstractValue>(context).get_value<string>(),
new_value);
// Can't Eval into the wrong type.
DRAKE_EXPECT_THROWS_MESSAGE(
port.Eval<int>(context),
"OutputPort::Eval().*wrong value type int.*actual type.*std::string.*");
}
TEST_F(LeafOutputPortTest, DisableByDefaultWorks) {
absport_general_.disable_caching_by_default();
const CacheEntry& abs_entry = absport_general_.cache_entry();
const CacheEntry& vec_entry = vecport_general_.cache_entry();
EXPECT_TRUE(abs_entry.is_disabled_by_default());
EXPECT_FALSE(vec_entry.is_disabled_by_default());
// Find the cache entry values in a created context and verify that
// the right one is disabled.
auto my_context = dummy_.CreateDefaultContext();
const CacheEntryValue& abs_value =
my_context->get_cache().get_cache_entry_value(abs_entry.cache_index());
const CacheEntryValue& vec_value =
my_context->get_cache().get_cache_entry_value(vec_entry.cache_index());
EXPECT_TRUE(abs_value.is_cache_entry_disabled());
EXPECT_FALSE(vec_value.is_cache_entry_disabled());
}
// Check for proper construction and functioning of abstract LeafOutputPorts.
TEST_F(LeafOutputPortTest, AbstractPorts) {
// Check abstract port with explicit function allocator.
AbstractPortCheck(*context_, absport_general_, "from alloc_string");
}
// Helper function for testing a vector-valued port.
void VectorPortCheck(const Context<double>& context,
const LeafOutputPort<double>& port,
Vector3d alloc_value) {
// Treat the vector-valued port as a BasicVector, which
// should have MyVector3d as concrete type.
unique_ptr<AbstractValue> val = port.Allocate();
auto& basic = val->template get_mutable_value<BasicVector<double>>();
MyVector3d& myvector3 = dynamic_cast<MyVector3d&>(basic);
EXPECT_EQ(basic.get_value(), alloc_value);
EXPECT_EQ(myvector3.get_value(), alloc_value);
port.Calc(context, val.get());
// Should have written into the underlying MyVector3d.
const Vector3d new_value(99., 100., 101.);
EXPECT_EQ(myvector3.get_value(), new_value);
// Check that Eval is correct, for many ValueType choices.
const auto& eval_eigen = port.Eval(context);
const BasicVector<double>& eval_basic =
port.Eval<BasicVector<double>>(context);
const MyVector3d& eval_myvec3 = port.Eval<MyVector3d>(context);
const AbstractValue& eval_abs = port.Eval<AbstractValue>(context);
EXPECT_EQ(eval_eigen, new_value);
EXPECT_EQ(eval_basic.CopyToVector(), new_value);
EXPECT_EQ(eval_myvec3.CopyToVector(), new_value);
EXPECT_EQ(
eval_abs.get_value<BasicVector<double>>().CopyToVector(),
new_value);
}
// Check for proper construction and functioning of vector-valued
// LeafOutputPorts.
TEST_F(LeafOutputPortTest, VectorPorts) {
// Check vector port with explicit function allocator.
VectorPortCheck(*context_, vecport_general_, Vector3d(-1., -2., -3.));
}
// AllocCallback that returns an illegal null value.
unique_ptr<AbstractValue> alloc_null() {
return nullptr;
}
// The null check is done in all builds.
TEST_F(LeafOutputPortTest, ThrowIfNullAlloc) {
// Create an abstract port with an allocator that returns null.
// TODO(sherm1) Use implicit_cast when available (from abseil).
auto null_port = internal::FrameworkFactory::Make<LeafOutputPort<double>>(
&dummy_, // implicit_cast<const System<T>*>(&dummy_)
&dummy_, // implicit_cast<SystemBase*>(&dummy_),
dummy_.get_system_id(),
"null_port",
OutputPortIndex(dummy_.num_output_ports()),
dummy_.assign_next_dependency_ticket(),
kAbstractValued, 0 /* size */,
&dummy_.DeclareCacheEntry(
"null", ValueProducer(alloc_null, calc_string)));
// Creating a context for this system should fail when it tries to allocate
// a cache entry for null_port.
EXPECT_THROW(dummy_.CreateDefaultContext(), std::logic_error);
}
// Check that Debug builds catch bad output types. We can't run these tests
// unchecked since the results would be indeterminate -- they may run to
// completion or segfault depending on memory contents.
TEST_F(LeafOutputPortTest, ThrowIfBadCalcOutput) {
if (kDrakeAssertIsDisarmed) {
return;
}
// The abstract port is a string; let's give it an int.
auto good_out = absport_general_.Allocate();
auto bad_out = AbstractValue::Make<int>(5);
DRAKE_EXPECT_NO_THROW(absport_general_.Calc(*context_, good_out.get()));
DRAKE_EXPECT_THROWS_MESSAGE(
absport_general_.Calc(*context_, bad_out.get()),
"OutputPort::Calc().*expected.*std::string.*got.*int.*");
// The vector port is a MyVector3d, we'll give it a BasicVector.
auto good_vec = vecport_general_.Allocate();
auto bad_vec = AbstractValue::Make(BasicVector<double>(2));
DRAKE_EXPECT_NO_THROW(vecport_general_.Calc(*context_, good_vec.get()));
DRAKE_EXPECT_THROWS_MESSAGE(
vecport_general_.Calc(*context_, bad_vec.get()),
"OutputPort::Calc().*expected.*MyVector.*got.*BasicVector.*");
}
// For testing diagram output ports we need a couple of subsystems that have
// recognizably different Contexts so we can verify that (1) the diagram exports
// the correct ports, and (2) the diagram passes down the right subcontext. Here
// we just vary the number of continuous state variables which lets us check
// both things. We also need to test that nested diagrams manage to export
// already-exported ports correctly.
class SystemWithNStates : public LeafSystem<double> {
public:
explicit SystemWithNStates(int num_states) {
DeclareContinuousState(num_states);
DeclareAbstractOutputPort(
kUseDefaultName, &SystemWithNStates::ReturnNumContinuous);
}
~SystemWithNStates() override {}
private:
void ReturnNumContinuous(const Context<double>& context, int* nc) const {
ASSERT_NE(nc, nullptr);
*nc = context.num_continuous_states();
}
};
// A one-level diagram.
class MyDiagram : public Diagram<double> {
public:
MyDiagram() {
DiagramBuilder<double> builder;
auto sys1 = builder.AddSystem<SystemWithNStates>(1);
auto sys2 = builder.AddSystem<SystemWithNStates>(2);
builder.ExportOutput(sys1->get_output_port(0));
builder.ExportOutput(sys2->get_output_port(0));
builder.BuildInto(this);
}
};
// A two-level nested diagram.
class MyNestedDiagram : public Diagram<double> {
public:
MyNestedDiagram() {
DiagramBuilder<double> builder;
auto leaf = builder.AddSystem<SystemWithNStates>(3);
auto diag = builder.AddSystem<MyDiagram>();
// Order so that the nested ports have to change numbering.
builder.ExportOutput(leaf->get_output_port(0)); // Should have 3 states.
builder.ExportOutput(diag->get_output_port(0)); // 1 state.
builder.ExportOutput(diag->get_output_port(1)); // 2 states.
builder.BuildInto(this);
}
};
GTEST_TEST(DiagramOutputPortTest, OneLevel) {
MyDiagram diagram;
auto context = diagram.CreateDefaultContext();
auto& out0 = diagram.get_output_port(0);
auto& out1 = diagram.get_output_port(1);
auto value0 = out0.Allocate(); // unique_ptr<AbstractValue>
auto value1 = out1.Allocate();
const int* int0{};
const int* int1{};
DRAKE_EXPECT_NO_THROW(int0 = &value0->get_value<int>());
DRAKE_EXPECT_NO_THROW(int1 = &value1->get_value<int>());
EXPECT_EQ(*int0, 0); // Default value initialized.
EXPECT_EQ(*int1, 0);
out0.Calc(*context, value0.get());
out1.Calc(*context, value1.get());
EXPECT_EQ(*int0, 1); // Make sure we got the right Context.
EXPECT_EQ(*int1, 2);
// When given an inapproprate context, we fail-fast.
const auto& diagram_context = dynamic_cast<DiagramContext<double>&>(*context);
const auto& sys1_context = diagram_context.GetSubsystemContext(
SubsystemIndex{0});
DRAKE_EXPECT_THROWS_MESSAGE(
out0.Eval<int>(sys1_context),
".*Context.*was not created for this OutputPort.*");
}
GTEST_TEST(DiagramOutputPortTest, Nested) {
MyNestedDiagram diagram;
auto context = diagram.CreateDefaultContext();
auto& out0 = diagram.get_output_port(0);
auto& out1 = diagram.get_output_port(1);
auto& out2 = diagram.get_output_port(2);
auto value0 = out0.Allocate(); // unique_ptr<AbstractValue>
auto value1 = out1.Allocate();
auto value2 = out2.Allocate();
const int* int0{};
const int* int1{};
const int* int2{};
DRAKE_EXPECT_NO_THROW(int0 = &value0->get_value<int>());
DRAKE_EXPECT_NO_THROW(int1 = &value1->get_value<int>());
DRAKE_EXPECT_NO_THROW(int2 = &value2->get_value<int>());
EXPECT_EQ(*int0, 0); // Default value initialized.
EXPECT_EQ(*int1, 0);
EXPECT_EQ(*int2, 0);
out0.Calc(*context, value0.get());
out1.Calc(*context, value1.get());
out2.Calc(*context, value2.get());
EXPECT_EQ(*int0, 3); // Make sure we got the right Context.
EXPECT_EQ(*int1, 1);
EXPECT_EQ(*int2, 2);
}
} // namespace
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems/framework | /home/johnshepherd/drake/systems/framework/test/dependency_tracker_test.cc | #include "drake/systems/framework/dependency_tracker.h"
#include <cmath>
#include <functional>
#include <memory>
#include <string>
#include <vector>
#include "gtest/gtest.h"
#include "drake/common/test_utilities/expect_no_throw.h"
#include "drake/systems/framework/context_base.h"
// Tests the DependencyTracker and DependencyGraph classes. These are intimately
// tied to the CacheEntryValue class in order to be able to invalidate
// cache entries with inline code for speed. We're not testing CacheEntryValue
// here but use it to check that DependencyTracker propagates invalidations
// to cache entries correctly.
//
// Although DependencyTracker code has no direct dependence on higher-level
// Context code, for this test we do require ContextBase so we can use the
// DependencyGraph and Cache objects it contains, and so we can test the cloning
// operation which requires several steps that ContextBase understands how to
// exercise (cloning of a DependencyGraph is always initiated as part of
// cloning a Context).
namespace drake {
namespace systems {
namespace {
// See above for why this is here.
class MyContextBase final : public ContextBase {
public:
MyContextBase() {}
MyContextBase(const MyContextBase&) = default;
private:
std::unique_ptr<ContextBase> DoCloneWithoutPointers() const final {
return std::make_unique<MyContextBase>(*this);
}
};
// For testing that trackers did what we expected.
struct Stats {
int64_t ignored{0};
int64_t sent{0};
int64_t value_change{0};
int64_t prereq_change{0};
};
void ExpectStatsMatch(const DependencyTracker* tracker, const Stats& expected) {
EXPECT_EQ(tracker->num_ignored_notifications(), expected.ignored)
<< tracker->description();
EXPECT_EQ(tracker->num_notifications_sent(), expected.sent)
<< tracker->description();
EXPECT_EQ(tracker->num_value_change_events(), expected.value_change)
<< tracker->description();
EXPECT_EQ(tracker->num_prerequisite_change_events(), expected.prereq_change)
<< tracker->description();
EXPECT_EQ(tracker->num_notifications_received(),
tracker->num_value_change_events() +
tracker->num_prerequisite_change_events())
<< tracker->description();
}
// Test that the built-in trackers exist and are wired up correctly. See
// framework_common.h for the built-in tracker ticket numbers and make sure
// they are all tested here. See ContextBase::CreateBuiltInTrackers() to see
// how they are supposed to be wired up.
// (User-friendly access to tickets is provided by SystemBase methods; we have
// to construct them manually here.)
GTEST_TEST(DependencyTracker, BuiltInTrackers) {
MyContextBase context, context2;
// Make sure each tracker knows its own ticket and looks reasonable. This is
// also a unit test for ThrowIfBadDependencyTracker().
for (int ticket_int = 0; ticket_int < internal::kNextAvailableTicket;
++ticket_int) {
const DependencyTicket ticket(ticket_int);
ASSERT_TRUE(context.get_dependency_graph().has_tracker(ticket));
auto& tracker = context.get_tracker(ticket);
EXPECT_EQ(tracker.ticket(), ticket);
const auto& context_interface =
static_cast<const internal::ContextMessageInterface&>(context);
DRAKE_EXPECT_NO_THROW(tracker.ThrowIfBadDependencyTracker(
&context, &context_interface.dummy_cache_entry_value()));
EXPECT_THROW(tracker.ThrowIfBadDependencyTracker(&context2),
std::logic_error);
}
// Now check that each built-in tracker has the expected prerequisites and
// dependents.
using DT = DependencyTicket; // Reduce clutter.
auto& nothing = context.get_tracker(DT(internal::kNothingTicket));
auto& time = context.get_tracker(DT(internal::kTimeTicket));
auto& accuracy = context.get_tracker(DT(internal::kAccuracyTicket));
auto& q = context.get_tracker(DT(internal::kQTicket));
auto& v = context.get_tracker(DT(internal::kVTicket));
auto& z = context.get_tracker(DT(internal::kZTicket));
auto& xc = context.get_tracker(DT(internal::kXcTicket));
auto& xd = context.get_tracker(DT(internal::kXdTicket));
auto& xa = context.get_tracker(DT(internal::kXaTicket));
auto& x = context.get_tracker(DT(internal::kXTicket));
auto& pn = context.get_tracker(DT(internal::kPnTicket));
auto& pa = context.get_tracker(DT(internal::kPaTicket));
auto& p = context.get_tracker(DT(internal::kAllParametersTicket));
auto& u = context.get_tracker(DT(internal::kAllInputPortsTicket));
auto& all_non_u_sources =
context.get_tracker(DT(internal::kAllSourcesExceptInputPortsTicket));
auto& all_sources = context.get_tracker(DT(internal::kAllSourcesTicket));
auto& configuration = context.get_tracker(DT(internal::kConfigurationTicket));
auto& kinematics = context.get_tracker(DT(internal::kKinematicsTicket));
auto& xc_dot = context.get_tracker(DT(internal::kXcdotTicket));
auto& pe = context.get_tracker(DT(internal::kPeTicket));
auto& ke = context.get_tracker(DT(internal::kKeTicket));
auto& pc = context.get_tracker(DT(internal::kPcTicket));
auto& pnc = context.get_tracker(DT(internal::kPncTicket));
// "nothing" has no prerequisites or subscribers.
EXPECT_EQ(nothing.prerequisites().size(), 0);
EXPECT_EQ(nothing.subscribers().size(), 0);
// time and accuracy are independent. all_sources depends on both,
// configuration tracker depends on accuracy.
EXPECT_EQ(time.prerequisites().size(), 0);
ASSERT_EQ(time.subscribers().size(), 1);
EXPECT_EQ(time.subscribers()[0], &all_non_u_sources);
EXPECT_EQ(accuracy.prerequisites().size(), 0);
ASSERT_EQ(accuracy.subscribers().size(), 2);
EXPECT_EQ(accuracy.subscribers()[0], &all_non_u_sources);
EXPECT_EQ(accuracy.subscribers()[1], &configuration);
// q, v, z are independent but xc subscribes to all, configuration to q,
// and kinematics to v.
EXPECT_EQ(q.prerequisites().size(), 0);
ASSERT_EQ(q.subscribers().size(), 2);
EXPECT_EQ(q.subscribers()[0], &xc);
EXPECT_EQ(q.subscribers()[1], &configuration);
EXPECT_EQ(v.prerequisites().size(), 0);
ASSERT_EQ(v.subscribers().size(), 2);
EXPECT_EQ(v.subscribers()[0], &xc);
EXPECT_EQ(v.subscribers()[1], &kinematics);
EXPECT_EQ(z.prerequisites().size(), 0);
ASSERT_EQ(z.subscribers().size(), 2);
EXPECT_EQ(z.subscribers()[0], &xc);
EXPECT_EQ(z.subscribers()[1], &configuration);
// xc depends on q, v, and z and x subscribes.
ASSERT_EQ(xc.prerequisites().size(), 3);
EXPECT_EQ(xc.prerequisites()[0], &q);
EXPECT_EQ(xc.prerequisites()[1], &v);
EXPECT_EQ(xc.prerequisites()[2], &z);
ASSERT_EQ(xc.subscribers().size(), 1);
EXPECT_EQ(xc.subscribers()[0], &x);
// No discrete variables yet so xd is independent; x, configuration
// subscribes.
EXPECT_EQ(xd.prerequisites().size(), 0);
ASSERT_EQ(xd.subscribers().size(), 2);
EXPECT_EQ(xd.subscribers()[0], &x);
EXPECT_EQ(xd.subscribers()[1], &configuration);
// No abstract variables yet so xa is independent; x, configuration
// subscribes.
EXPECT_EQ(xa.prerequisites().size(), 0);
ASSERT_EQ(xa.subscribers().size(), 2);
EXPECT_EQ(xa.subscribers()[0], &x);
EXPECT_EQ(xa.subscribers()[1], &configuration);
// x depends on xc, xd, and xa; all_sources subscribes.
ASSERT_EQ(x.prerequisites().size(), 3);
EXPECT_EQ(x.prerequisites()[0], &xc);
EXPECT_EQ(x.prerequisites()[1], &xd);
EXPECT_EQ(x.prerequisites()[2], &xa);
ASSERT_EQ(x.subscribers().size(), 1);
EXPECT_EQ(x.subscribers()[0], &all_non_u_sources);
// Until #9171 is resolved, we don't know which states and parameters affect
// configuration so we have to assume they all do (except v).
// TODO(sherm1) Revise after #9171 is resolved.
ASSERT_EQ(configuration.prerequisites().size(), 6);
EXPECT_EQ(configuration.prerequisites()[0], &accuracy);
EXPECT_EQ(configuration.prerequisites()[1], &q);
EXPECT_EQ(configuration.prerequisites()[2], &z);
EXPECT_EQ(configuration.prerequisites()[3], &xd);
EXPECT_EQ(configuration.prerequisites()[4], &xa);
EXPECT_EQ(configuration.prerequisites()[5], &p);
ASSERT_EQ(configuration.subscribers().size(), 1);
EXPECT_EQ(configuration.subscribers()[0], &kinematics);
// kinematics depends on everything configuration depends on, plus v.
// TODO(sherm1) Revise after #9171 is resolved.
ASSERT_EQ(kinematics.prerequisites().size(), 2);
EXPECT_EQ(kinematics.prerequisites()[0], &configuration);
EXPECT_EQ(kinematics.prerequisites()[1], &v);
EXPECT_EQ(kinematics.subscribers().size(), 0);
// all_parameters tracker depends on the numeric and abstract parameter
// trackers. all_sources, and configuration subscribe.
EXPECT_EQ(p.prerequisites().size(), 2);
EXPECT_EQ(p.prerequisites()[0], &pn);
EXPECT_EQ(p.prerequisites()[1], &pa);
ASSERT_EQ(p.subscribers().size(), 2);
EXPECT_EQ(p.subscribers()[0], &all_non_u_sources);
EXPECT_EQ(p.subscribers()[1], &configuration);
// We don't have any specific input ports yet so u has no prerequisites. Only
// all_sources subscribes.
EXPECT_EQ(u.prerequisites().size(), 0);
ASSERT_EQ(u.subscribers().size(), 1);
EXPECT_EQ(u.subscribers()[0], &all_sources);
// All sources except input ports depends on time, accuracy, x, p; no
// subscribers yet.
ASSERT_EQ(all_non_u_sources.prerequisites().size(), 4);
EXPECT_EQ(all_non_u_sources.prerequisites()[0], &time);
EXPECT_EQ(all_non_u_sources.prerequisites()[1], &accuracy);
EXPECT_EQ(all_non_u_sources.prerequisites()[2], &x);
EXPECT_EQ(all_non_u_sources.prerequisites()[3], &p);
EXPECT_EQ(all_non_u_sources.subscribers().size(), 1);
EXPECT_EQ(all_non_u_sources.subscribers()[0], &all_sources);
// All sources depends on the non_u sources plus u; no subscribers yet.
ASSERT_EQ(all_sources.prerequisites().size(), 2);
EXPECT_EQ(all_sources.prerequisites()[0], &all_non_u_sources);
EXPECT_EQ(all_sources.prerequisites()[1], &u);
EXPECT_EQ(all_sources.subscribers().size(), 0);
// Cache entry trackers are created during Context construction but are not
// connected to the corresponding cache entry values until those are
// allocated later by the system framework (in SystemBase).
EXPECT_EQ(xc_dot.prerequisites().size(), 0);
EXPECT_EQ(xc_dot.subscribers().size(), 0);
EXPECT_EQ(pe.prerequisites().size(), 0);
EXPECT_EQ(pe.subscribers().size(), 0);
EXPECT_EQ(ke.prerequisites().size(), 0);
EXPECT_EQ(ke.subscribers().size(), 0);
EXPECT_EQ(pc.prerequisites().size(), 0);
EXPECT_EQ(pc.subscribers().size(), 0);
EXPECT_EQ(pnc.prerequisites().size(), 0);
EXPECT_EQ(pnc.subscribers().size(), 0);
}
// Normally the dependency trackers are allocated automatically by the
// System framework. Here we try to use as little of the framework as possible
// and cobble together the following dependency graph by hand:
//
// +-----------+ +---------------+
// | upstream1 +--------+-----------> downstream1 |
// +-----------+ | +---> |
// | | +---------------+
// +-----------+ +----v----+ | +---------------+
// | upstream2 +---> middle1 +--+---> downstream2 +--+
// +-----------+ +----+----+ +---------------+ | +-----------+
// | +--> |
// +-----------+ +---------------------------------> entry0 |
// | time +------------------------------------------> |
// | +---> others +-----------+
// +-----------+
//
// entry0 is a cache entry so we expect invalidation; the others are just
// trackers with no associated values. Time is a built-in tracker and may
// have other subscribers besides what we added here.
class HandBuiltDependencies : public ::testing::Test {
protected:
void SetUp() override {
DependencyGraph& graph = context_.get_mutable_dependency_graph();
DependencyTicket next_ticket(internal::kNextAvailableTicket);
upstream1_ = &graph.CreateNewDependencyTracker(
next_ticket++, "upstream1");
upstream2_ = &graph.CreateNewDependencyTracker(
next_ticket++, "upstream2");
middle1_ = &graph.CreateNewDependencyTracker(
next_ticket++, "middle1");
downstream1_ = &graph.CreateNewDependencyTracker(
next_ticket++, "downstream1");
downstream2_ = &graph.CreateNewDependencyTracker(
next_ticket++, "downstream2");
middle1_->SubscribeToPrerequisite(upstream1_);
middle1_->SubscribeToPrerequisite(upstream2_);
downstream1_->SubscribeToPrerequisite(middle1_);
downstream1_->SubscribeToPrerequisite(upstream1_);
downstream2_->SubscribeToPrerequisite(middle1_);
Cache& cache = context_.get_mutable_cache();
const CacheIndex index(cache.cache_size());
entry0_ = &cache.CreateNewCacheEntryValue(
index, next_ticket++, "entry0",
{time_ticket_, middle1_->ticket(), downstream2_->ticket()}, &graph);
entry0_->SetInitialValue(AbstractValue::Make<int>(3));
// A new tracker should have been created.
DRAKE_EXPECT_NO_THROW(entry0_tracker_ =
&graph.get_mutable_tracker(entry0_->ticket()));
// Retrieve time tracker.
time_tracker_ = &graph.get_mutable_tracker(time_ticket_);
}
void ExpectAllStatsMatch() const {
ExpectStatsMatch(time_tracker_, tt_stats_);
ExpectStatsMatch(upstream1_, up1_stats_);
ExpectStatsMatch(upstream2_, up2_stats_);
ExpectStatsMatch(middle1_, mid1_stats_);
ExpectStatsMatch(downstream1_, down1_stats_);
ExpectStatsMatch(downstream2_, down2_stats_);
ExpectStatsMatch(entry0_tracker_, entry0_stats_);
}
std::unique_ptr<MyContextBase> context_ptr_ =
std::make_unique<MyContextBase>();
ContextBase& context_ = *context_ptr_;
DependencyTracker* middle1_{};
DependencyTracker* upstream1_{};
DependencyTracker* upstream2_{};
DependencyTracker* downstream1_{};
DependencyTracker* downstream2_{};
CacheEntryValue* entry0_{};
DependencyTracker* entry0_tracker_{};
const DependencyTicket time_ticket_{internal::kTimeTicket};
DependencyTracker* time_tracker_{};
// Expected statistics for each of the above trackers; initially zero.
Stats tt_stats_, up1_stats_, up2_stats_, mid1_stats_, down1_stats_,
down2_stats_, entry0_stats_;
};
// Check that we can ask the graph for new dependency trackers, and that the
// associated cache entry has the right value.
TEST_F(HandBuiltDependencies, Construction) {
DependencyGraph& graph = context_.get_mutable_dependency_graph();
// Construct with a known ticket.
auto& tracker100 = graph.CreateNewDependencyTracker(
DependencyTicket(100), "tracker100");
EXPECT_EQ(tracker100.ticket(), 100);
// Construct with assigned ticket (should get the next one).
const int num_trackers = graph.trackers_size();
auto& tracker = graph.CreateNewDependencyTracker("tracker");
EXPECT_EQ(tracker.ticket(), num_trackers);
// There were no cache entries assigned to those two trackers. Check
// that cache_entry_value() understands that.
EXPECT_EQ(tracker100.cache_entry_value(), nullptr);
EXPECT_EQ(tracker.cache_entry_value(), nullptr);
// In the HandBuiltDependencies SetUp() we assigned cache entry
// entry0_ to entry0_tracker_; make sure cache_entry_value() agrees.
EXPECT_EQ(entry0_tracker_->cache_entry_value(), entry0_);
// Make sure we can add a cache entry to a tracker. (Reusing entry0_
// here but that doesn't matter since we're just checking that the pointer
// gets memorized properly.)
tracker.set_cache_entry_value(entry0_);
EXPECT_EQ(tracker.cache_entry_value(), entry0_);
// Make sure that when we create a cache entry for an already-allocated
// built-in tracker, that tracker gets used rather than creating a
// new one. xcdot is an example where this is needed in practice.
Cache& cache = context_.get_mutable_cache();
const CacheIndex index(cache.cache_size());
const DependencyTicket xcdot_ticket(internal::kXcdotTicket);
const DependencyTracker& xcdot_tracker(graph.get_tracker(xcdot_ticket));
const CacheEntryValue& xcdot_value = cache.CreateNewCacheEntryValue(
index, xcdot_ticket, "xcdot cache value",
{time_ticket_}, &graph);
EXPECT_EQ(xcdot_value.ticket(), xcdot_ticket);
EXPECT_EQ(xcdot_tracker.cache_entry_value(), &xcdot_value);
}
// Check that a dependency tracker can provide a human-readable name.
TEST_F(HandBuiltDependencies, GetPathname) {
const std::string system_path = context_.GetSystemPathname();
const std::string mid1_description = middle1_->description();
EXPECT_EQ(middle1_->GetPathDescription(),
system_path + ":" + mid1_description);
}
// Check that we can unsubscribe from a previously-subscribed-to
// prerequisite.
TEST_F(HandBuiltDependencies, Unsubscribe) {
EXPECT_TRUE(middle1_->HasPrerequisite(*upstream1_));
EXPECT_TRUE(middle1_->HasPrerequisite(*upstream2_));
middle1_->UnsubscribeFromPrerequisite(upstream1_);
EXPECT_FALSE(middle1_->HasPrerequisite(*upstream1_));
EXPECT_TRUE(middle1_->HasPrerequisite(*upstream2_));
}
// Check that notifications and invalidation are propagated correctly, and that
// short-circuiting keeps the number of notifications minimal when there are
// multiple paths through the graph.
TEST_F(HandBuiltDependencies, Notify) {
// Just-allocated cache entries are not up to date. We're not using the
// cache entry API here -- just playing with the underlying "up to date" flag.
EXPECT_TRUE(entry0_->is_out_of_date());
// set_value() sets marks the entry up to date.
entry0_->set_value(1125);
EXPECT_FALSE(entry0_->is_out_of_date());
// Refer to diagram above to decipher the expected stats below.
// Nobody should have been notified yet.
ExpectAllStatsMatch();
// The cache entry does not depend on downstream1.
downstream1_->NoteValueChange(1LL);
down1_stats_.value_change++; // No dependents.
EXPECT_FALSE(entry0_->is_out_of_date());
ExpectAllStatsMatch();
// A repeated notification (same change event) should be ignored.
downstream1_->NoteValueChange(1LL);
down1_stats_.value_change++;
down1_stats_.ignored++;
ExpectAllStatsMatch();
// The cache entry depends directly on time.
time_tracker_->NoteValueChange(2LL);
tt_stats_.value_change++;
tt_stats_.sent += time_tracker_->num_subscribers(); // entry0, others
entry0_stats_.prereq_change++;
EXPECT_TRUE(entry0_->is_out_of_date());
ExpectAllStatsMatch();
entry0_->mark_up_to_date();
EXPECT_FALSE(entry0_->is_out_of_date());
upstream1_->NoteValueChange(3LL);
up1_stats_.value_change++;
up1_stats_.sent += 2; // mid1, down1
mid1_stats_.prereq_change++;
mid1_stats_.sent += 3; // down1, down2, entry0
down1_stats_.prereq_change += 2;
down1_stats_.ignored++;
down2_stats_.prereq_change++;
down2_stats_.sent++; // entry0
entry0_stats_.prereq_change += 2;
entry0_stats_.ignored++;
EXPECT_TRUE(entry0_->is_out_of_date());
ExpectAllStatsMatch();
}
// Clone the dependency graph and make sure the clone works like the
// original did, but on the new entities!
TEST_F(HandBuiltDependencies, Clone) {
DependencyGraph& graph = context_.get_mutable_dependency_graph();
// Make up a ticket number that is guaranteed to leave a gap to make sure
// we test handling of missing trackers.
const DependencyTicket after_gap_ticket(graph.trackers_size() + 3);
const DependencyTracker& after_gap = graph.CreateNewDependencyTracker(
after_gap_ticket, "after_gap");
// Do some notifies in the old context so we can make sure all the stats
// get cleared for the clone. (Previous test ensured these are propagated.)
upstream1_->NoteValueChange(5LL);
upstream2_->NoteValueChange(6LL);
time_tracker_->NoteValueChange(7LL);
// Create a clone of the dependency graph and exercise the pointer fixup code.
auto clone_context = context_.Clone();
// Check a tracker that is known NOT to be a cache entry tracker to
// ensure that it is referencing the dummy CacheEntryValue in the cloned
// context, not the original one.
// Verify that each Context has a unique dummy CacheEntryValue.
const auto& context_interface =
static_cast<const internal::ContextMessageInterface&>(context_);
const auto& clone_context_interface =
static_cast<const internal::ContextMessageInterface&>(*clone_context);
ASSERT_NE(&context_interface.dummy_cache_entry_value(),
&clone_context_interface.dummy_cache_entry_value());
// Now verify that the trackers are associated with the right one.
const DependencyTracker& original_time_tracker =
context_.get_tracker(time_ticket_);
const DependencyTracker& clone_time_tracker =
clone_context->get_tracker(time_ticket_);
DRAKE_EXPECT_NO_THROW(original_time_tracker.ThrowIfBadDependencyTracker(
&context_, &context_interface.dummy_cache_entry_value()));
DRAKE_EXPECT_NO_THROW(clone_time_tracker.ThrowIfBadDependencyTracker(
clone_context.get(), &clone_context_interface.dummy_cache_entry_value()));
// Now study the cloned graph to see if it got fixed up correctly.
DependencyGraph& clone_graph = clone_context->get_mutable_dependency_graph();
EXPECT_EQ(clone_graph.trackers_size(), graph.trackers_size());
for (DependencyTicket ticket(0); ticket < graph.trackers_size(); ++ticket) {
EXPECT_EQ(graph.has_tracker(ticket), clone_graph.has_tracker(ticket));
if (!graph.has_tracker(ticket)) continue;
const auto& tracker = graph.get_tracker(ticket);
const auto& clone_tracker = clone_graph.get_tracker(ticket);
EXPECT_NE(&clone_tracker, &tracker);
EXPECT_EQ(clone_tracker.description(), tracker.description());
EXPECT_EQ(clone_tracker.num_subscribers(), tracker.num_subscribers());
EXPECT_EQ(clone_tracker.num_prerequisites(), tracker.num_prerequisites());
for (int i=0; i < tracker.num_subscribers(); ++i) {
const DependencyTracker* clone_subs = clone_tracker.subscribers()[i];
const DependencyTracker* subs = tracker.subscribers()[i];
EXPECT_NE(clone_subs, nullptr);
EXPECT_NE(clone_subs, subs);
EXPECT_EQ(clone_subs->ticket(), subs->ticket());
EXPECT_EQ(clone_subs->description(), subs->description());
}
for (int i=0; i < tracker.num_prerequisites(); ++i) {
const DependencyTracker* clone_pre = clone_tracker.prerequisites()[i];
const DependencyTracker* pre = tracker.prerequisites()[i];
EXPECT_NE(clone_pre, nullptr);
EXPECT_NE(clone_pre, pre);
EXPECT_EQ(clone_pre->ticket(), pre->ticket());
EXPECT_EQ(clone_pre->description(), pre->description());
}
}
// Dig up corresponding trackers & the cloned cache entry. The auto here
// is "DependencyTracker".
auto& time1 = clone_graph.get_mutable_tracker(time_ticket_);
auto& up1 = clone_graph.get_mutable_tracker(upstream1_->ticket());
auto& up2 = clone_graph.get_mutable_tracker(upstream2_->ticket());
auto& mid1 = clone_graph.get_mutable_tracker(middle1_->ticket());
auto& down1 = clone_graph.get_mutable_tracker(downstream1_->ticket());
auto& down2 = clone_graph.get_mutable_tracker(downstream2_->ticket());
auto& e0_tracker = clone_graph.get_mutable_tracker(entry0_->ticket());
auto& after_gap_clone = clone_graph.get_tracker(after_gap.ticket());
// Check that gaps in the tracker ticket numbering were preserved.
EXPECT_EQ(after_gap_clone.ticket(), after_gap_ticket);
EXPECT_FALSE(clone_graph.has_tracker(DependencyTicket(after_gap_ticket - 1)));
// Find the cloned cache entry corresponding to entry0_.
Cache& clone_cache = clone_context->get_mutable_cache();
CacheEntryValue& clone_entry0 =
clone_cache.get_mutable_cache_entry_value(entry0_->cache_index());
// Expected statistics for the cloned trackers; initially zero.
Stats tt_stats, up1_stats, up2_stats, mid1_stats, down1_stats,
down2_stats, entry0_stats;
// All stats should have been cleared in the clone.
ExpectStatsMatch(&time1, tt_stats);
ExpectStatsMatch(&up1, up1_stats);
ExpectStatsMatch(&up2, up2_stats);
ExpectStatsMatch(&mid1, mid1_stats);
ExpectStatsMatch(&down1, down1_stats);
ExpectStatsMatch(&down2, down2_stats);
ExpectStatsMatch(&e0_tracker, entry0_stats);
EXPECT_TRUE(clone_entry0.is_out_of_date());
clone_entry0.set_value(101);
EXPECT_FALSE(clone_entry0.is_out_of_date());
// Upstream2 is prerequisite to middle1 which is prerequisite to down1,2 and
// the cache entry, and down2 gets the cache entry again so should be
// ignored.
up2.NoteValueChange(1LL);
up2_stats.value_change++;
up2_stats.sent++; // mid1
mid1_stats.prereq_change++;
mid1_stats.sent += 3; // down1, down2, entry0
down1_stats.prereq_change++;
down2_stats.prereq_change++;
down2_stats.sent++;
entry0_stats.prereq_change += 2;
entry0_stats.ignored++;
ExpectStatsMatch(&time1, tt_stats);
ExpectStatsMatch(&up1, up1_stats);
ExpectStatsMatch(&up2, up2_stats);
ExpectStatsMatch(&mid1, mid1_stats);
ExpectStatsMatch(&down1, down1_stats);
ExpectStatsMatch(&down2, down2_stats);
ExpectStatsMatch(&e0_tracker, entry0_stats);
}
// Check that we can make a DependencyTracker suppress notifications to
// its subscribers, and that the suppress_notifications flag is copied
// when a Context is cloned.
TEST_F(HandBuiltDependencies, SuppressNotifications) {
// Refer to diagram above to see the interconnections.
EXPECT_FALSE(middle1_->notifications_are_suppressed());
ExpectAllStatsMatch(); // Everything is zero here.
// This should notify downstream1 and middle1. Then middle1
// notifies downstream1 again, downstream2, and entry0.
upstream1_->NoteValueChange(1LL);
EXPECT_EQ(upstream1_->num_notifications_sent(), 2);
EXPECT_EQ(middle1_->num_prerequisite_change_events(), 1);
EXPECT_EQ(middle1_->num_ignored_notifications(), 0);
EXPECT_EQ(middle1_->num_notifications_sent(), 3);
EXPECT_EQ(downstream1_->num_prerequisite_change_events(), 2);
EXPECT_EQ(downstream2_->num_prerequisite_change_events(), 1);
EXPECT_EQ(downstream2_->num_notifications_sent(), 1);
EXPECT_EQ(entry0_tracker_->num_prerequisite_change_events(), 2);
// Now suppress notifications from middle1 and repeat the above. Make
// sure no one downstream of middle1 gets notified.
middle1_->suppress_notifications();
EXPECT_TRUE(middle1_->notifications_are_suppressed());
upstream1_->NoteValueChange(2LL);
EXPECT_EQ(upstream1_->num_notifications_sent(), 4); // +2
EXPECT_EQ(middle1_->num_prerequisite_change_events(), 2); // +1
EXPECT_EQ(middle1_->num_ignored_notifications(), 1); // should ignore now
EXPECT_EQ(middle1_->num_notifications_sent(), 3); // +0
EXPECT_EQ(downstream1_->num_prerequisite_change_events(), 3); // +1
EXPECT_EQ(downstream2_->num_prerequisite_change_events(), 1); // +0
EXPECT_EQ(downstream2_->num_notifications_sent(), 1); // +0
EXPECT_EQ(entry0_tracker_->num_prerequisite_change_events(), 2); // +0
// Check that the clone preserves the suppress_notifications flag.
auto clone_context = context_.Clone();
const DependencyTracker& clone_middle1_ =
clone_context->get_dependency_graph().get_tracker(middle1_->ticket());
EXPECT_TRUE(clone_middle1_.notifications_are_suppressed());
// Check a random one to make sure they didn't all get suppressed!
const DependencyTracker& clone_upstream1_ =
clone_context->get_dependency_graph().get_tracker(upstream1_->ticket());
EXPECT_FALSE(clone_upstream1_.notifications_are_suppressed());
}
} // namespace
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems/framework | /home/johnshepherd/drake/systems/framework/test/single_output_vector_source_test.cc | #include "drake/systems/framework/single_output_vector_source.h"
#include <memory>
#include <Eigen/Dense>
#include <gtest/gtest.h>
#include "drake/common/drake_copyable.h"
#include "drake/systems/framework/test_utilities/scalar_conversion.h"
namespace drake {
namespace systems {
namespace {
const int kSize = 3;
class TestSource : public SingleOutputVectorSource<double> {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(TestSource)
TestSource() : SingleOutputVectorSource<double>(kSize) {}
// N.B. This method signature might be used by many downstream projects.
// Change it only with good reason and with a deprecation period first.
void DoCalcVectorOutput(
const Context<double>& context,
Eigen::VectorBlock<Eigen::VectorXd>* output) const final {
*output = Eigen::Vector3d::Ones();
}
};
class SingleOutputVectorSourceTest : public ::testing::Test {
protected:
void SetUp() override {
source_ = std::make_unique<TestSource>();
context_ = source_->CreateDefaultContext();
}
std::unique_ptr<System<double>> source_;
std::unique_ptr<Context<double>> context_;
};
// Tests that the output is correct.
TEST_F(SingleOutputVectorSourceTest, OutputTest) {
ASSERT_EQ(context_->num_input_ports(), 0);
EXPECT_EQ(source_->get_output_port(0).Eval(*context_),
Eigen::Vector3d::Ones());
}
// Tests that the state is empty.
TEST_F(SingleOutputVectorSourceTest, IsStateless) {
EXPECT_EQ(context_->num_continuous_states(), 0);
}
// Some tag types used to select which constructor gets called.
struct UseTransmogrify {};
struct UseVector {};
template <typename T>
class Convertable final : public SingleOutputVectorSource<T> {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(Convertable)
using Base = SingleOutputVectorSource<T>;
using Tag = SystemTypeTag<Convertable>;
Convertable() : Base(kSize) {}
explicit Convertable(UseVector) : Base(*MakeVec()) {}
explicit Convertable(UseTransmogrify) : Base(Tag{}, kSize) {}
Convertable(UseTransmogrify, UseVector) : Base(Tag{}, *MakeVec()) {}
// Scalar-converting copy constructor.
template <typename U>
explicit Convertable(const Convertable<U>&)
: Convertable<T>(UseTransmogrify{}) {}
private:
auto MakeVec() const {
return std::make_unique<BasicVector<T>>(VectorX<T>::Constant(kSize, 22.0));
}
void DoCalcVectorOutput(
const Context<T>& context,
Eigen::VectorBlock<VectorX<T>>* output) const final {
*output = Eigen::Vector3d::Ones();
}
};
GTEST_TEST(SingleOutputVectorSourceConvertableTest, ScalarTypes) {
// The constructors without SystemScalarConverter do not support conversion.
const Convertable<double> false1;
const Convertable<double> false2{UseVector{}};
EXPECT_FALSE(is_autodiffxd_convertible(false1));
EXPECT_FALSE(is_autodiffxd_convertible(false2));
EXPECT_FALSE(is_symbolic_convertible(false1));
EXPECT_FALSE(is_symbolic_convertible(false2));
// The constructors with SystemScalarConverter do support conversion.
const Convertable<double> true1{UseTransmogrify{}};
const Convertable<double> true2{UseTransmogrify{}, UseVector{}};
EXPECT_TRUE(is_autodiffxd_convertible(true1));
EXPECT_TRUE(is_autodiffxd_convertible(true2));
EXPECT_TRUE(is_symbolic_convertible(true1));
EXPECT_TRUE(is_symbolic_convertible(true2));
}
} // namespace
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems/framework | /home/johnshepherd/drake/systems/framework/test/abstract_values_test.cc | #include "drake/systems/framework/abstract_values.h"
#include <memory>
#include <gtest/gtest.h>
#include "drake/systems/framework/test_utilities/pack_value.h"
namespace drake {
namespace systems {
namespace {
class AbstractStateTest : public ::testing::Test {
public:
AbstractStateTest() {
data_.push_back(PackValue(42));
data_.push_back(PackValue(76));
}
protected:
std::vector<std::unique_ptr<AbstractValue>> data_;
};
TEST_F(AbstractStateTest, OwnedState) {
AbstractValues xa(std::move(data_));
EXPECT_EQ(42, UnpackIntValue(xa.get_value(0)));
EXPECT_EQ(76, UnpackIntValue(xa.get_value(1)));
}
TEST_F(AbstractStateTest, UnownedState) {
AbstractValues xa(
std::vector<AbstractValue*>{data_[0].get(), data_[1].get()});
EXPECT_EQ(42, UnpackIntValue(xa.get_value(0)));
EXPECT_EQ(76, UnpackIntValue(xa.get_value(1)));
}
TEST_F(AbstractStateTest, SingleValueConstructor) {
AbstractValues xa(PackValue<int>(1000));
ASSERT_EQ(1, xa.size());
EXPECT_EQ(1000, UnpackIntValue(xa.get_value(0)));
}
TEST_F(AbstractStateTest, Clone) {
AbstractValues xa(std::move(data_));
std::unique_ptr<AbstractValues> clone = xa.Clone();
EXPECT_EQ(42, UnpackIntValue(clone->get_value(0)));
EXPECT_EQ(76, UnpackIntValue(clone->get_value(1)));
}
} // namespace
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems/framework | /home/johnshepherd/drake/systems/framework/images/cache_doxygen_trackers.dot | digraph {
rankdir=TB;
// Declare all of our nodes and their visual shape. Each statement here is the
// same rank (i.e., one row on the visual layout).
node[shape="folder",style="bold"];
{ rank=same; diag_xcdot; diag_ke; diag_pc; diag_pnc; diag_pe; }
diag_all_sources;
diag_kinematics;
diag_configuration;
{ rank=same; diag_x; diag_p; diag_a; diag_t; diag_u; super_diag_u_i; }
{ rank=same; diag_xc; diag_xd; diag_xa; diag_pn; diag_pa; }
{ rank=same; diag_q; diag_v; diag_z; diag_y; diag_u_i; diag_u_fixed; }
node[shape="rect",style="rounded"];
{ rank=same; leaf_xcdot; leaf_ke; leaf_pc; leaf_pnc; leaf_pe; }
leaf_all_sources;
leaf_kinematics;
leaf_configuration;
{ rank=same; leaf_x; leaf_p; leaf_a; leaf_t; leaf_u; }
{ rank=same; leaf_xc; leaf_xd; leaf_xa; leaf_pn; leaf_pa; }
{ rank=same; leaf_q; leaf_v; leaf_z; leaf_xd_i_all; leaf_xa_i_all;
leaf_pn_i_all; leaf_pa_i_all; leaf_y; leaf_u_i; leaf_u_fixed; }
// Tweak the top-to-bottom order, beyond what's implied by the underlying
// graph rankings.
// - Stack diag atop leaf, don't intermingle their rows.
{ rank=same; leaf_xcdot; leaf_xcdot_invis[style=invis,label=""]; }
diag_u_i -> leaf_xcdot_invis[style=invis];
// - Stack ..._all_sources atop ..._kinematics, not as a sibling.
{ rank=same; diag_kinematics; dkinvis[style=invis,label=""]; }
{ rank=same; leaf_kinematics; lkinvis[style=invis,label=""]; }
diag_all_sources -> dkinvis[style=invis];
leaf_all_sources -> lkinvis[style=invis];
// Tweak the left-to-right order, beyond the default edge-routing heuristics.
leaf_all_sources[ordering="in"]; diag_all_sources[ordering="in"];
leaf_x[ordering="out"]; diag_x[ordering="out"];
leaf_p[ordering="out"]; diag_p[ordering="out"];
leaf_xc[ordering="out"]; diag_xc[ordering="out"];
leaf_z -> leaf_xd_i_all[style=invis];
// This list exactly mirrors the "Predefined dependency tickets" table.
// We omit some nodes for conciseness.
// (omitted) leaf_nothing
leaf_t[label="time (t)"];
leaf_a[label="accuracy (a)"];
leaf_q[label="q"];
leaf_v[label="v"];
leaf_z[label="z"];
leaf_xc[label="xc"];
leaf_xd[label="xd"]; leaf_xd_i_all[label="xd_i"];
leaf_xa[label="xa"]; leaf_xa_i_all[label="xa_i"];
leaf_x[label="state (x)"];
leaf_pn[label="pn"]; leaf_pn_i_all[label="pn_i"];
leaf_pa[label="pa"]; leaf_pa_i_all[label="pa_i"];
leaf_p[label="parameters (p)"];
leaf_u[label="input (u)"];
leaf_all_sources[label="all_sources"];
leaf_configuration[label="configuration"];
leaf_kinematics[label="kinematics"];
leaf_xcdot[label="xcdot"];
leaf_pe[label="pe"];
leaf_ke[label="ke"];
leaf_pc[label="pc"];
leaf_pnc[label="pnc"];
// (omitted) leaf_pn_i
// (omitted) leaf_pa_i
// (omitted) leaf_xd_i
// (omitted) leaf_xa_i
leaf_u_i[label="u_i"];
leaf_y[label="y_j",style=dotted];
leaf_u_fixed[label="fixed"];
// (omitted) leaf_c_i
leaf_xc -> { leaf_q leaf_v leaf_z };
leaf_xd -> leaf_xd_i_all[color="black:white:black"];
leaf_xa -> leaf_xa_i_all[color="black:white:black"];
leaf_x -> { leaf_xc leaf_xd leaf_xa };
leaf_pn-> leaf_pn_i_all[color="black:white:black"];
leaf_pa -> leaf_pa_i_all[color="black:white:black"];
leaf_p -> { leaf_pn leaf_pa };
leaf_u -> leaf_u_i[color="black:white:black"];
leaf_all_sources -> { leaf_x, leaf_p, leaf_a, leaf_t, leaf_u };
leaf_configuration -> { leaf_q, leaf_p, leaf_a };
leaf_configuration -> leaf_z[style="dashed"]; // Footnote 3.
leaf_configuration -> leaf_xd[style="dashed"]; // Footnote 3.
leaf_configuration -> leaf_xa[style="dashed"]; // Footnote 3.
leaf_kinematics -> { leaf_configuration, leaf_v };
leaf_xcdot-> leaf_all_sources;
leaf_pe -> leaf_all_sources;
leaf_ke -> leaf_all_sources;
leaf_pc -> leaf_all_sources;
leaf_pnc -> leaf_all_sources;
leaf_y -> leaf_u_i[color="green",dir=back,minlen=2]; // Footnote 4.
leaf_u_i -> leaf_u_fixed[color="green",minlen=2]; // Footnote 4.
leaf_u_i -> diag_u_i[color="green",minlen=2]; // Footnote 4.
// This list repeats the above stanza but with "leaf_" renamed to "diag_",
// and with the "footnote 2" edges removed.
// (omitted) leaf_nothing
diag_t[label="time (t)"];
diag_a[label="accuracy (a)"];
diag_q[label="q"];
diag_v[label="v"];
diag_z[label="z"];
diag_xc[label="xc"];
diag_xd[label="xd"];
diag_xa[label="xa"];
diag_x[label="state (x)"];
diag_pn[label="pn"];
diag_pa[label="pa"];
diag_p[label="parameters (p)"];
diag_u[label="input (u)"];
diag_all_sources[label="all_sources"];
diag_configuration[label="configuration"];
diag_kinematics[label="kinematics"];
diag_xcdot[label="xcdot"];
diag_pe[label="pe"];
diag_ke[label="ke"];
diag_pc[label="pc"];
diag_pnc[label="pnc"];
diag_u_i[label="u_i"];
diag_y[label="y_j",style=dotted];
diag_u_fixed[label="fixed"];
// (omitted) diag_c_i
diag_xc -> { diag_q diag_v diag_z };
diag_x -> { diag_xc diag_xd diag_xa };
diag_p -> { diag_pn diag_pa };
diag_u -> diag_u_i[color="black:white:black"];
diag_all_sources -> { diag_x, diag_p, diag_a, diag_t, diag_u };
diag_configuration -> { diag_q, diag_p, diag_a };
diag_configuration -> diag_xd[style="dashed"]; // Footnote 3.
diag_configuration -> diag_xa[style="dashed"]; // Footnote 3.
diag_kinematics -> { diag_configuration, diag_v };
diag_xcdot -> diag_all_sources[style="dashed"]; // Footnote 5.
diag_pe -> diag_all_sources[style="dashed"]; // Footnote 5.
diag_ke -> diag_all_sources[style="dashed"]; // Footnote 5.
diag_pc -> diag_all_sources[style="dashed"]; // Footnote 5.
diag_pnc -> diag_all_sources[style="dashed"]; // Footnote 5.
diag_y -> diag_u_i[color="green",dir=back,minlen=2]; // Footnote 4.
diag_u_i -> diag_u_fixed[color="green",minlen=2]; // Footnote 4.
diag_u_i -> super_diag_u_i[color="green",minlen=2]; // Footnote 4.
super_diag_u_i[shape=plaintext,label="... parent diagram u_i ..."];
// This list exactly mirrors the "Diagram-specific implementation" table, for
// the "Subscribes to" column.
diag_q -> leaf_q[color="blue"];
diag_v -> leaf_v[color="blue"];
diag_z -> leaf_z[color="blue"];
diag_xd -> leaf_xd[color="blue"];
diag_xa -> leaf_xa[color="blue"];
diag_pn -> leaf_pn[color="blue"];
diag_pa -> leaf_pa[color="blue"];
diag_xcdot -> leaf_xcdot[color="blue"];
diag_pe -> leaf_pe[color="blue"];
diag_ke -> leaf_ke[color="blue"];
diag_pc -> leaf_pc[color="blue"];
diag_pnc -> leaf_pnc[color="blue"];
// This list exactly mirrors the "Diagram-specific implementation" table, for
// the "Notifications send" column.
diag_t -> leaf_t[dir="back",color="red",style="dotted"];
diag_a -> leaf_a[dir="back",color="red",style="dotted"];
diag_q -> leaf_q[dir="back",color="red",style="dotted"];
diag_v -> leaf_v[dir="back",color="red",style="dotted"];
diag_z -> leaf_z[dir="back",color="red",style="dotted"];
diag_xd -> leaf_xd[dir="back",color="red",style="dotted"];
diag_xa -> leaf_xa[dir="back",color="red",style="dotted"];
diag_pn -> leaf_pn[dir="back",color="red",style="dotted"];
diag_pa -> leaf_pa[dir="back",color="red",style="dotted"];
} // diagraph
| 0 |
/home/johnshepherd/drake/systems/framework | /home/johnshepherd/drake/systems/framework/images/system_context_cache.svg | <?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
style="fill:none;stroke:none;stroke-linecap:square;stroke-miterlimit:10"
height="450"
width="850"
id="svg349"
stroke-miterlimit="10"
viewBox="0 0 850 450"
version="1.1"
inkscape:version="0.91 r13725"
sodipodi:docname="system_context_cache.svg">
<sodipodi:namedview
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1"
objecttolerance="10"
gridtolerance="10"
guidetolerance="10"
inkscape:pageopacity="0"
inkscape:pageshadow="2"
inkscape:window-width="3763"
inkscape:window-height="1951"
id="namedview3766"
showgrid="false"
inkscape:zoom="3.8729412"
inkscape:cx="488.16923"
inkscape:cy="225"
inkscape:window-x="65"
inkscape:window-y="24"
inkscape:window-maximized="1"
inkscape:current-layer="svg349" />
<metadata
id="metadata355">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title />
</cc:Work>
</rdf:RDF>
</metadata>
<defs
id="defs353" />
<clipPath
id="p.0">
<path
style="clip-rule:nonzero"
id="path2"
d="M 0,0 H 960 V 720 H 0 Z" />
</clipPath>
<path
d="m -14.19685,-255.66407 h 960 v 720 h -960 z"
id="path5"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 98.24147,100.89498 h 74.3622"
id="path7"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 98.24147,100.89498 h 68.3622"
id="path9"
style="fill-rule:evenodd;stroke:#1155cc;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 166.60367,102.54671 4.5381,-1.65173 -4.5381,-1.651737 z"
id="path11"
style="fill:#1155cc;fill-rule:evenodd;stroke:#1155cc;stroke-width:1;stroke-linecap:butt" />
<path
d="m 98.24147,121.6955 h 74.3622"
id="path13"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 98.24147,121.6955 h 68.3622"
id="path15"
style="fill-rule:evenodd;stroke:#1155cc;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 166.60367,123.34723 4.5381,-1.65173 -4.5381,-1.65172 z"
id="path17"
style="fill:#1155cc;fill-rule:evenodd;stroke:#1155cc;stroke-width:1;stroke-linecap:butt" />
<path
d="M 241.59581,120.09708 H 702.60368 V 382.90024 H 241.59581 Z"
id="path19"
style="fill:#d9d9d9;fill-rule:evenodd" />
<path
d="M 241.59581,120.09708 H 702.60368 V 382.90024 H 241.59581 Z"
id="path21"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 122.80315,143.13645 49.79527,-0.62991"
id="path23"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 122.80315,143.13645 43.79575,-0.55403"
id="path25"
style="fill-rule:evenodd;stroke:#1155cc;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 166.6198,144.23403 4.51685,-1.709 -4.55864,-1.59421 z"
id="path27"
style="fill:#1155cc;fill-rule:evenodd;stroke:#1155cc;stroke-width:1;stroke-linecap:butt" />
<path
d="M 98.80315,67.136453 172,67.640393"
id="path29"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 98.80315,67.136453 67.19699,0.46263"
id="path31"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 165.98877,69.250773 4.54936,-1.62044 -4.52662,-1.68294 z"
id="path33"
style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt" />
<path
d="M 174.80315,43.136453 H 635.81102 V 315.13645 H 174.80315 Z"
id="path35"
style="fill:#cfe2f3;fill-rule:evenodd" />
<path
d="M 174.80315,43.136453 H 635.81102 V 315.13645 H 174.80315 Z"
id="path37"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 487.05775,348.49603 h 327.4331 v 21.60632 h -327.4331 z"
id="path39"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 234.05248,4.4960327 h 327.4331 V 26.102333 h -327.4331 z"
id="path43"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 636.80315,159.13647 94.99213,1.00787"
id="path47"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 636.80315,159.13647 88.99243,0.94421"
id="path49"
style="fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 725.77805,161.73233 4.55542,-1.60352 -4.52032,-1.69976 z"
id="path51"
style="fill:#ff0000;fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt" />
<path
d="m 609.91698,181.09579 120.8819,0.0315"
id="path53"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 609.91698,181.09579 114.8819,0.0299"
id="path55"
style="fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 724.79845,182.77743 4.53851,-1.65054 -4.53766,-1.65293 z"
id="path57"
style="fill:#ff0000;fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt" />
<path
d="m 636.60369,206.49499 h 96"
id="path59"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 636.60369,206.49499 h 90"
id="path61"
style="fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 726.60369,208.14673 4.53809,-1.65173 -4.53809,-1.65174 z"
id="path63"
style="fill:#ff0000;fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt" />
<path
d="m 608.37841,96.335933 113.95276,-0.18898"
id="path65"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 608.37835,96.335933 107.95276,-0.17903"
id="path67"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 716.33385,97.808633 4.53534,-1.65926 -4.54077,-1.64419 z"
id="path69"
style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt" />
<path
d="M 7.041996,96.687633 H 103.042 V 137.88448 H 7.041996 Z"
id="path71"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 607.26245,65.574783 114.55121,-0.44095"
id="path81"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 607.26245,65.574783 108.55127,-0.41785"
id="path83"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 715.82005,66.808653 4.53168,-1.66919 -4.54438,-1.63425 z"
id="path85"
style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt" />
<path
d="m 4,43.136453 h 100 v 41.19685 H 4 Z"
id="path87"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 738.71655,165.13909 h 96 v 41.19684 h -96 z"
id="path95"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="M 733.39635,50.894983 H 848.60894 V 115.68238 H 733.39635 Z"
id="path105"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="M 262.60369,350.1711 120.8084,350.1396"
id="path111"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="M 262.60369,350.1711 120.8084,350.1396"
id="path113"
style="fill-rule:evenodd;stroke:#1155cc;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 121.80315,350.13647 1.00787,-208.00002"
id="path115"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 121.80315,350.13647 1.00787,-208.00002"
id="path117"
style="fill-rule:evenodd;stroke:#1155cc;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 181.31234,233.62991 v 0 c 0,-8.13208 31.74194,-14.72443 70.89764,-14.72443 v 0 c 39.15567,0 70.89765,6.59235 70.89765,14.72443 v 0 c 0,8.13205 -31.74198,14.7244 -70.89765,14.7244 v 0 c -39.1557,0 -70.89764,-6.59235 -70.89764,-14.7244 z"
id="path119"
style="fill:#d9d9d9;fill-rule:evenodd" />
<path
d="m 181.31234,233.62991 v 0 c 0,-8.13208 31.74194,-14.72443 70.89764,-14.72443 v 0 c 39.15567,0 70.89765,6.59235 70.89765,14.72443 v 0 c 0,8.13205 -31.74198,14.7244 -70.89765,14.7244 v 0 c -39.1557,0 -70.89764,-6.59235 -70.89764,-14.7244 z"
id="path121"
style="fill-rule:evenodd;stroke:#b45f06;stroke-width:2;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 170.90288,67.136453 v 0 c 0,-5.96642 9.31389,-10.80315 20.80316,-10.80315 v 0 c 11.48926,0 20.80313,4.83674 20.80313,10.80315 v 0 c 0,5.96642 -9.31387,10.80315 -20.80313,10.80315 v 0 c -11.48927,0 -20.80316,-4.83674 -20.80316,-10.80315 z"
id="path123"
style="fill:#d9d9d9;fill-rule:evenodd" />
<path
d="m 170.90288,67.136453 v 0 c 0,-5.96642 9.31389,-10.80315 20.80316,-10.80315 v 0 c 11.48926,0 20.80313,4.83674 20.80313,10.80315 v 0 c 0,5.96642 -9.31387,10.80315 -20.80313,10.80315 v 0 c -11.48927,0 -20.80316,-4.83674 -20.80316,-10.80315 z"
id="path125"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 170.90288,101.13645 v 0 c 0,-5.966417 9.31389,-10.803147 20.80316,-10.803147 v 0 c 11.48926,0 20.80313,4.83674 20.80313,10.803147 v 0 c 0,5.96642 -9.31387,10.80315 -20.80313,10.80315 v 0 c -11.48927,0 -20.80316,-4.83674 -20.80316,-10.80315 z"
id="path127"
style="fill:#d9d9d9;fill-rule:evenodd" />
<path
d="m 170.90288,101.13645 v 0 c 0,-5.966417 9.31389,-10.803147 20.80316,-10.803147 v 0 c 11.48926,0 20.80313,4.83674 20.80313,10.803147 v 0 c 0,5.96642 -9.31387,10.80315 -20.80313,10.80315 v 0 c -11.48927,0 -20.80316,-4.83674 -20.80316,-10.80315 z"
id="path129"
style="fill-rule:evenodd;stroke:#1155cc;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 170.90288,122.13645 v 0 c 0,-5.96642 9.31389,-10.80315 20.80316,-10.80315 v 0 c 11.48926,0 20.80313,4.83674 20.80313,10.80315 v 0 c 0,5.96642 -9.31387,10.80315 -20.80313,10.80315 v 0 c -11.48927,0 -20.80316,-4.83674 -20.80316,-10.80315 z"
id="path131"
style="fill:#d9d9d9;fill-rule:evenodd" />
<path
d="m 170.90288,122.13645 v 0 c 0,-5.96642 9.31389,-10.80315 20.80316,-10.80315 v 0 c 11.48926,0 20.80313,4.83674 20.80313,10.80315 v 0 c 0,5.96642 -9.31387,10.80315 -20.80313,10.80315 v 0 c -11.48927,0 -20.80316,-4.83674 -20.80316,-10.80315 z"
id="path133"
style="fill-rule:evenodd;stroke:#1155cc;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 170.90288,143.13645 v 0 c 0,-5.96642 9.31389,-10.80315 20.80316,-10.80315 v 0 c 11.48926,0 20.80313,4.83674 20.80313,10.80315 v 0 c 0,5.96642 -9.31387,10.80315 -20.80313,10.80315 v 0 c -11.48927,0 -20.80316,-4.83673 -20.80316,-10.80315 z"
id="path135"
style="fill:#d9d9d9;fill-rule:evenodd" />
<path
d="m 170.90288,143.13645 v 0 c 0,-5.96642 9.31389,-10.80315 20.80316,-10.80315 v 0 c 11.48926,0 20.80313,4.83674 20.80313,10.80315 v 0 c 0,5.96642 -9.31387,10.80315 -20.80313,10.80315 v 0 c -11.48927,0 -20.80316,-4.83673 -20.80316,-10.80315 z"
id="path137"
style="fill-rule:evenodd;stroke:#1155cc;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 262.60369,350.02883 v 0 c 0,-8.13205 39.62454,-14.7244 88.50394,-14.7244 v 0 c 48.87936,0 88.50394,6.59235 88.50394,14.7244 v 0 c 0,8.13208 -39.62457,14.72443 -88.50394,14.72443 v 0 c -48.87939,0 -88.50394,-6.59235 -88.50394,-14.72443 z"
id="path139"
style="fill:#d9d9d9;fill-rule:evenodd" />
<path
d="m 262.60369,350.02883 v 0 c 0,-8.13205 39.62454,-14.7244 88.50394,-14.7244 v 0 c 48.87936,0 88.50394,6.59235 88.50394,14.7244 v 0 c 0,8.13208 -39.62457,14.72443 -88.50394,14.72443 v 0 c -48.87939,0 -88.50394,-6.59235 -88.50394,-14.72443 z"
id="path141"
style="fill-rule:evenodd;stroke:#1155cc;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 187.55118,221.5039 h 125.51183 v 21.60632 H 187.55118 Z"
id="path145"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 181.31234,273.62991 v 0 c 0,-8.13208 31.74194,-14.72443 70.89764,-14.72443 v 0 c 39.15567,0 70.89765,6.59235 70.89765,14.72443 v 0 c 0,8.13205 -31.74198,14.7244 -70.89765,14.7244 v 0 c -39.1557,0 -70.89764,-6.59235 -70.89764,-14.7244 z"
id="path153"
style="fill:#d9d9d9;fill-rule:evenodd" />
<path
d="m 181.31234,273.62991 v 0 c 0,-8.13208 31.74194,-14.72443 70.89764,-14.72443 v 0 c 39.15567,0 70.89765,6.59235 70.89765,14.72443 v 0 c 0,8.13205 -31.74198,14.7244 -70.89765,14.7244 v 0 c -39.1557,0 -70.89764,-6.59235 -70.89764,-14.7244 z"
id="path155"
style="fill-rule:evenodd;stroke:#38761d;stroke-width:2;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 191.55118,263.5039 h 125.51183 v 21.60632 H 191.55118 Z"
id="path157"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 338.91088,92.792633 v 0 c 0,-5.96642 8.32016,-10.80315 18.58359,-10.80315 v 0 c 10.26343,0 18.58359,4.83674 18.58359,10.80315 v 0 c 0,5.96642 -8.32016,10.803147 -18.58359,10.803147 v 0 c -10.26343,0 -18.58359,-4.836737 -18.58359,-10.803147 z"
id="path165"
style="fill:#d9d9d9;fill-rule:evenodd" />
<path
d="m 338.91088,92.792633 v 0 c 0,-5.96642 8.32016,-10.80315 18.58359,-10.80315 v 0 c 10.26343,0 18.58359,4.83674 18.58359,10.80315 v 0 c 0,5.96642 -8.32016,10.803147 -18.58359,10.803147 v 0 c -10.26343,0 -18.58359,-4.836737 -18.58359,-10.803147 z"
id="path167"
style="fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="M 287.81282,81.989483 H 356.6227 V 103.59577 H 287.81282 Z"
id="path169"
style="fill:#d9d9d9;fill-rule:evenodd" />
<path
d="M 287.81282,81.989483 H 356.6227 V 103.59577 H 287.81282 Z"
id="path171"
style="fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 353.64492,84.259823 h 5.97122 v 17.007867 h -5.97122 z"
id="path173"
style="fill:#d9d9d9;fill-rule:evenodd" />
<path
d="m 572.74981,181.09579 v 0 c 0,-5.96643 8.32013,-10.80316 18.58356,-10.80316 v 0 c 10.26342,0 18.58361,4.83673 18.58361,10.80316 v 0 c 0,5.9664 -8.32019,10.80313 -18.58361,10.80313 v 0 c -10.26343,0 -18.58356,-4.83673 -18.58356,-10.80313 z"
id="path175"
style="fill:#d9d9d9;fill-rule:evenodd" />
<path
d="m 572.74981,181.09579 v 0 c 0,-5.96643 8.32013,-10.80316 18.58356,-10.80316 v 0 c 10.26342,0 18.58361,4.83673 18.58361,10.80316 v 0 c 0,5.9664 -8.32019,10.80313 -18.58361,10.80313 v 0 c -10.26343,0 -18.58356,-4.83673 -18.58356,-10.80313 z"
id="path177"
style="fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 521.65175,170.29263 h 68.80988 v 21.60629 h -68.80988 z"
id="path179"
style="fill:#d9d9d9;fill-rule:evenodd" />
<path
d="m 521.65175,170.29263 h 68.80988 v 21.60629 h -68.80988 z"
id="path181"
style="fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 587.48381,172.56295 h 5.97119 v 17.00787 h -5.97119 z"
id="path183"
style="fill:#d9d9d9;fill-rule:evenodd" />
<path
d="m 446.88951,180.70339 v 0 c 0,-5.96643 8.32016,-10.80316 18.58362,-10.80316 v 0 c 10.26343,0 18.58356,4.83673 18.58356,10.80316 v 0 c 0,5.9664 -8.32013,10.80313 -18.58356,10.80313 v 0 c -10.26346,0 -18.58362,-4.83673 -18.58362,-10.80313 z"
id="path185"
style="fill:#d9d9d9;fill-rule:evenodd" />
<path
d="m 446.88951,180.70339 v 0 c 0,-5.96643 8.32016,-10.80316 18.58362,-10.80316 v 0 c 10.26343,0 18.58356,4.83673 18.58356,10.80316 v 0 c 0,5.9664 -8.32013,10.80313 -18.58356,10.80313 v 0 c -10.26346,0 -18.58362,-4.83673 -18.58362,-10.80313 z"
id="path187"
style="fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 395.79149,169.90023 h 68.80981 v 21.60629 h -68.80981 z"
id="path189"
style="fill:#d9d9d9;fill-rule:evenodd" />
<path
d="m 395.79149,169.90023 h 68.80981 v 21.60629 h -68.80981 z"
id="path191"
style="fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 461.62355,172.17059 h 5.97122 v 17.00787 h -5.97122 z"
id="path193"
style="fill:#d9d9d9;fill-rule:evenodd" />
<path
d="m 571.21115,96.335933 v 0 c 0,-5.96642 8.32019,-10.80315 18.58362,-10.80315 v 0 c 10.26343,0 18.58362,4.83674 18.58362,10.80315 v 0 c 0,5.966417 -8.32019,10.803147 -18.58362,10.803147 v 0 c -10.26343,0 -18.58362,-4.83674 -18.58362,-10.803147 z"
id="path195"
style="fill:#d9d9d9;fill-rule:evenodd" />
<path
d="m 571.21115,96.335933 v 0 c 0,-5.96642 8.32019,-10.80315 18.58362,-10.80315 v 0 c 10.26343,0 18.58362,4.83674 18.58362,10.80315 v 0 c 0,5.966417 -8.32019,10.803147 -18.58362,10.803147 v 0 c -10.26343,0 -18.58362,-4.83674 -18.58362,-10.803147 z"
id="path197"
style="fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 520.11315,85.532783 h 68.80987 v 21.606287 h -68.80987 z"
id="path199"
style="fill:#d9d9d9;fill-rule:evenodd" />
<path
d="m 520.11315,85.532783 h 68.80987 v 21.606287 h -68.80987 z"
id="path201"
style="fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 585.94525,87.803133 h 5.97119 V 104.811 h -5.97119 z"
id="path203"
style="fill:#d9d9d9;fill-rule:evenodd" />
<path
d="m 570.87511,65.099713 v 0 c 0,-5.96642 8.26556,-10.80315 18.46161,-10.80315 v 0 c 10.19604,0 18.46161,4.83674 18.46161,10.80315 v 0 c 0,5.96642 -8.26557,10.80315 -18.46161,10.80315 v 0 c -10.19605,0 -18.46161,-4.83674 -18.46161,-10.80315 z"
id="path205"
style="fill:#d9d9d9;fill-rule:evenodd" />
<path
d="m 570.87511,65.099713 v 0 c 0,-5.96642 8.26556,-10.80315 18.46161,-10.80315 v 0 c 10.19604,0 18.46161,4.83674 18.46161,10.80315 v 0 c 0,5.96642 -8.26557,10.80315 -18.46161,10.80315 v 0 c -10.19605,0 -18.46161,-4.83674 -18.46161,-10.80315 z"
id="path207"
style="fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 520.11255,54.296563 h 68.35815 v 21.60629 h -68.35815 z"
id="path209"
style="fill:#d9d9d9;fill-rule:evenodd" />
<path
d="m 520.11255,54.296563 h 68.35815 v 21.60629 h -68.35815 z"
id="path211"
style="fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 585.51245,56.566903 h 5.93201 v 17.00787 h -5.93201 z"
id="path213"
style="fill:#d9d9d9;fill-rule:evenodd" />
<path
d="m 449.88951,132.49866 v 0 c 0,-5.96642 8.32016,-10.80316 18.58362,-10.80316 v 0 c 10.26343,0 18.58356,4.83674 18.58356,10.80316 v 0 c 0,5.96642 -8.32013,10.80315 -18.58356,10.80315 v 0 c -10.26346,0 -18.58362,-4.83674 -18.58362,-10.80315 z"
id="path215"
style="fill:#d9d9d9;fill-rule:evenodd" />
<path
d="m 449.88951,132.49866 v 0 c 0,-5.96642 8.32016,-10.80316 18.58362,-10.80316 v 0 c 10.26343,0 18.58356,4.83674 18.58356,10.80316 v 0 c 0,5.96642 -8.32013,10.80315 -18.58356,10.80315 v 0 c -10.26346,0 -18.58362,-4.83674 -18.58362,-10.80315 z"
id="path217"
style="fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 398.79149,121.6955 h 68.80981 v 21.60631 h -68.80981 z"
id="path219"
style="fill:#d9d9d9;fill-rule:evenodd" />
<path
d="m 398.79149,121.6955 h 68.80981 v 21.60631 h -68.80981 z"
id="path221"
style="fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 464.62358,123.96586 h 5.97119 v 17.00787 h -5.97119 z"
id="path223"
style="fill:#d9d9d9;fill-rule:evenodd" />
<path
d="m 487.05669,132.49866 c 8.2641,0 12.39429,-9.03938 16.5282,-18.07875 4.13385,-9.03937 8.27136,-18.078737 16.54266,-18.078737"
id="path225"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 487.05669,132.49866 c 8.2641,0 12.39429,-9.03938 16.52814,-18.07875 2.06695,-4.51968 4.13476,-9.03937 6.71985,-12.42912 0.64624,-0.84745 1.32482,-1.62427 2.04376,-2.312817 0.35949,-0.34426 0.72912,-0.66648 1.1098,-0.9644 0.19037,-0.14898 0.38348,-0.29187 0.57947,-0.42841 0.098,-0.0683 0.19678,-0.13496 0.29626,-0.20003 l 0.0443,-0.028"
id="path227"
style="fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 514.85055,99.639873 3.87616,-2.8806 -4.82092,-0.28488 z"
id="path229"
style="fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt" />
<path
d="m 487.05669,132.49866 c 37.18109,0 55.77167,6.40944 74.36224,12.81889 18.59052,6.40946 37.18109,12.81891 74.36218,12.81891"
id="path231"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 487.05669,132.49866 c 37.18115,0 55.77167,6.40944 74.36224,12.81889 9.29529,3.20473 18.59052,6.40946 30.20966,8.81299 5.80951,1.20178 12.20001,2.20325 19.46197,2.9043 3.63098,0.35049 7.4798,0.62591 11.58277,0.81369 2.05151,0.0939 4.1665,0.16586 6.34967,0.21435 l 0.75842,0.0147"
id="path233"
style="fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 629.76519,159.72923 4.55414,-1.60712 -4.52173,-1.69616 z"
id="path235"
style="fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt" />
<path
d="M 322.80315,273.13647 H 612.8189"
id="path237"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="M 322.80315,273.13647 H 612.8189"
id="path239"
style="fill-rule:evenodd;stroke:#38761d;stroke-width:2;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 323.10762,233.62991 172.6929,-1.48032"
id="path241"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 323.10762,233.62991 172.6929,-1.48032"
id="path243"
style="fill-rule:evenodd;stroke:#b45f06;stroke-width:2;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 212.50917,67.136453 c 18.57483,0 27.86221,5 37.14963,10 9.28738,5 18.5748,10 37.1496,10"
id="path245"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 212.50917,67.136453 c 18.57483,0 27.86221,5 37.14963,10 4.64367,2.5 9.28738,5 15.09201,6.875 2.90231,0.9375 6.09488,1.71875 9.72275,2.26562 1.81396,0.27344 3.73675,0.48829 5.7865,0.63477 l 0.55127,0.0368"
id="path247"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 280.75963,88.599573 4.58759,-1.50888 -4.48417,-1.79297 z"
id="path249"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt" />
<path
d="m 212.50917,101.13645 c 18.82678,0 28.24017,-1 37.65356,-1.999997 9.4134,-1 18.82676,-2 37.65354,-2"
id="path251"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 212.50917,101.13645 c 18.82678,0 28.24017,-1 37.65356,-1.999997 4.7067,-0.5 9.4134,-1 15.29676,-1.375 2.94168,-0.1875 6.17752,-0.34375 9.85461,-0.45313 1.83856,-0.0547 3.78745,-0.0976 5.86499,-0.12693 l 0.6373,-0.008"
id="path253"
style="fill-rule:evenodd;stroke:#1155cc;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 281.82662,98.825283 4.52777,-1.67978 -4.54822,-1.62363 z"
id="path255"
style="fill-rule:evenodd;stroke:#1155cc;stroke-width:1;stroke-linecap:butt" />
<path
d="m 376.07805,92.792633 c 5.67831,0 8.51804,9.929137 11.3566,19.858257 2.83859,9.92914 5.67605,19.85828 11.35208,19.85828"
id="path257"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 376.07805,92.792633 c 5.67831,0 8.51804,9.929137 11.35663,19.858257 1.41928,4.96457 2.83829,9.92914 4.612,13.65256 0.44339,0.93086 0.90899,1.78415 1.40225,2.54046 0.0617,0.0945 0.12378,0.18757 0.18631,0.27904 l 0.10193,0.14548"
id="path259"
style="fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 392.84505,130.6585 4.71133,1.06105 -2.92706,-3.8412 z"
id="path261"
style="fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt" />
<path
d="m 376.07805,92.792633 c 36.00861,0 54.01328,-6.92126 72.01721,-13.84253 18.00391,-6.92125 36.00714,-13.84251 72.01428,-13.84251"
id="path263"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 376.07805,92.792633 c 36.00861,0 54.01328,-6.92126 72.01721,-13.84253 9.00198,-3.46062 18.00372,-6.92125 29.25592,-9.51672 5.6261,-1.29775 11.81476,-2.37918 18.84741,-3.1362 3.5163,-0.37851 7.24359,-0.67591 11.21704,-0.87867 1.9867,-0.10139 4.03498,-0.17912 6.14917,-0.2315 l 0.54511,-0.0118"
id="path265"
style="fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 514.12855,66.826853 4.51923,-1.70279 -4.55646,-1.60046 z"
id="path267"
style="fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt" />
<path
d="m 580.80315,272.13647 c 0,-28.5 13.25195,-48.74802 26.50397,-57 13.25195,-8.25195 26.5039,-4.50787 26.5039,-9.01575"
id="path269"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 580.80315,272.13647 c 0,-28.5 13.25195,-48.74802 26.50397,-57 6.62597,-4.12598 13.25195,-5.25293 18.22143,-5.9119 0.62116,-0.0824 1.21649,-0.15744 1.78266,-0.2301 0.28314,-0.0364 0.5589,-0.0721 0.82702,-0.10788 l 0.3252,-0.045"
id="path271"
style="fill-rule:evenodd;stroke:#38761d;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 629.21245,210.31371 3.29565,-3.53006 -4.7937,0.58582 z"
id="path273"
style="fill-rule:evenodd;stroke:#38761d;stroke-width:1;stroke-linecap:butt" />
<path
d="m 212.50917,122.13645 c 46.57483,0 69.86221,4.25197 93.14963,8.50394 23.28738,4.25197 46.5748,8.50393 93.1496,8.50393"
id="path275"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 212.50917,122.13645 c 46.57483,0 69.86221,4.25197 93.14963,8.50394 11.64368,2.12599 23.28738,4.25197 37.84201,5.84646 7.27731,0.79724 15.28238,1.46161 24.37903,1.92667 4.54831,0.23253 9.36951,0.41524 14.50909,0.53979 2.5698,0.0623 5.21921,0.11004 7.95392,0.14223 0.68366,0.008 1.37265,0.0151 2.06708,0.0212 l 0.3985,0.003"
id="path277"
style="fill-rule:evenodd;stroke:#1155cc;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 392.80165,140.77135 4.54486,-1.63304 -4.53128,-1.6704 z"
id="path279"
style="fill-rule:evenodd;stroke:#1155cc;stroke-width:1;stroke-linecap:butt" />
<path
d="m 414.25939,233.93173 c 0,-10.60629 3.98425,-15.90945 7.96851,-21.21259 3.98425,-5.30316 7.9685,-10.60629 7.9685,-21.21261"
id="path281"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 414.25939,233.93173 c 0,-10.60629 3.98425,-15.90945 7.96851,-21.21259 1.99212,-2.65158 3.98425,-5.30316 5.47836,-8.61761 0.74704,-1.65726 1.36957,-3.48019 1.80536,-5.55176 0.0544,-0.25894 0.10602,-0.52176 0.15451,-0.78863 l 0.0479,-0.27402"
id="path283"
style="fill-rule:evenodd;stroke:#b45f06;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 431.3604,197.6199 -1.28152,-4.65619 -2.01124,4.39063 z"
id="path285"
style="fill-rule:evenodd;stroke:#b45f06;stroke-width:1;stroke-linecap:butt" />
<path
d="m 484.05669,180.70339 c 9.39874,0 14.09674,0.0945 18.79755,0.18896 4.70074,0.0945 9.40435,0.18897 18.80877,0.18897"
id="path287"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 484.05669,180.70339 c 9.39874,0 14.09674,0.0945 18.79755,0.18896 2.35034,0.0473 4.70147,0.0945 7.6405,0.12992 1.46954,0.0177 3.08606,0.0325 4.92297,0.0428 l 0.2453,0.001"
id="path289"
style="fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 515.65885,182.71783 4.54224,-1.6402 -4.53388,-1.66327 z"
id="path291"
style="fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt" />
<path
d="m 528.81255,273.12728 c 0,-20.24411 3.24414,-30.36615 6.48822,-40.48819 3.24408,-10.12207 6.48816,-20.24411 6.48816,-40.48819"
id="path293"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 528.81261,273.12728 c 0,-20.24411 3.24408,-30.36615 6.48816,-40.48819 1.62207,-5.06103 3.24408,-10.12207 4.46063,-16.44833 0.60828,-3.16315 1.11517,-6.64261 1.46997,-10.59653 0.17743,-1.97696 0.31684,-4.07254 0.41187,-6.30652 0.0118,-0.27924 0.0231,-0.56064 0.0336,-0.84424 l 0.01,-0.29345"
id="path295"
style="fill-rule:evenodd;stroke:#38761d;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 543.33831,198.17815 -1.57422,-4.56558 -1.72876,4.50934 z"
id="path297"
style="fill-rule:evenodd;stroke:#38761d;stroke-width:1;stroke-linecap:butt" />
<path
d="m 212.50917,143.13645 c 45.82074,0 68.73202,9.39369 91.64148,18.7874 22.90945,9.39371 45.81705,18.78741 91.63412,18.78741"
id="path299"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 212.50917,143.13645 c 45.82074,0 68.73202,9.39369 91.64145,18.7874 11.45474,4.69687 22.909,9.39371 37.22672,12.91635 7.15887,1.76132 15.0336,3.22907 23.98214,4.2565 4.47431,0.51373 9.21707,0.91739 14.27301,1.19257 2.52799,0.1376 5.13428,0.2431 7.82443,0.31418 0.67258,0.0178 1.35038,0.0334 2.03348,0.0468 l 0.29462,0.005"
id="path301"
style="fill-rule:evenodd;stroke:#1155cc;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 389.76958,182.30679 4.55334,-1.60919 -4.52243,-1.69415 z"
id="path303"
style="fill-rule:evenodd;stroke:#1155cc;stroke-width:1;stroke-linecap:butt" />
<path
d="m 480.80315,232.13647 c 0,-19.00009 10,-32.37418 20,-38.00015 10,-5.62601 20,-3.50388 20,-7.00772"
id="path305"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 480.80315,232.13647 c 0,-19.00009 10,-32.37418 20,-38.00015 5,-2.81299 10,-3.68897 13.75,-4.29965 l 0.82947,-0.13559"
id="path307"
style="fill-rule:evenodd;stroke:#b45f06;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 516.09081,191.19329 3.3916,-3.43793 -4.80798,0.45352 z"
id="path309"
style="fill-rule:evenodd;stroke:#b45f06;stroke-width:1;stroke-linecap:butt" />
<path
d="m 553.78895,274.13515 c 0,-19.74805 0.50397,-29.62207 1.00787,-39.49606 0.50397,-9.87403 1.00788,-19.74805 1.00788,-39.49607"
id="path311"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 553.78895,274.13515 c 0,-19.74805 0.50397,-29.62207 1.00787,-39.49606 0.50397,-9.87403 1.00788,-19.74805 1.00788,-39.49607"
id="path313"
style="fill-rule:evenodd;stroke:#38761d;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 555.80945,167.13909 c 0,-15.00002 -0.32288,-22.50002 -0.64569,-30.00002 -0.32282,-7.5 -0.64569,-15 -0.64569,-30"
id="path315"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 555.80945,167.13909 c 0,-15.00002 -0.32288,-22.50002 -0.64569,-30.00002 -0.16144,-3.75 -0.32282,-7.5 -0.44391,-12.1875 -0.0605,-2.34375 -0.11096,-4.92187 -0.14624,-7.85156 -0.009,-0.73242 -0.0167,-1.48681 -0.0236,-2.265 l -0.0123,-1.69597"
id="path317"
style="fill-rule:evenodd;stroke:#38761d;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 556.18945,113.13364 -1.66656,-4.53267 -1.63685,4.54349 z"
id="path319"
style="fill-rule:evenodd;stroke:#38761d;stroke-width:1;stroke-linecap:butt" />
<path
d="m 247.6903,430.42779 v 0 c 0,-8.13208 32.24255,-14.72443 72.01575,-14.72443 v 0 c 39.77319,0 72.01575,6.59235 72.01575,14.72443 v 0 c 0,8.13208 -32.24255,14.72443 -72.01575,14.72443 v 0 c -39.77319,0 -72.01575,-6.59235 -72.01575,-14.72443 z"
id="path321"
style="fill:#d9d9d9;fill-rule:evenodd" />
<path
d="m 247.6903,430.42779 v 0 c 0,-8.13208 32.24255,-14.72443 72.01575,-14.72443 v 0 c 39.77319,0 72.01575,6.59235 72.01575,14.72443 v 0 c 0,8.13208 -32.24255,14.72443 -72.01575,14.72443 v 0 c -39.77319,0 -72.01575,-6.59235 -72.01575,-14.72443 z"
id="path323"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 503.88469,428.26753 v 0 c 0,-7.64478 15.65271,-13.84204 34.96124,-13.84204 v 0 c 19.3086,0 34.96131,6.19726 34.96131,13.84204 v 0 c 0,7.64478 -15.65271,13.8421 -34.96131,13.8421 v 0 c -19.30853,0 -34.96124,-6.19732 -34.96124,-13.8421 z"
id="path327"
style="fill:#d9d9d9;fill-rule:evenodd" />
<path
d="m 503.88469,428.26753 v 0 c 0,-7.64478 15.65271,-13.84204 34.96124,-13.84204 v 0 c 19.3086,0 34.96131,6.19726 34.96131,13.84204 v 0 c 0,7.64478 -15.65271,13.8421 -34.96131,13.8421 v 0 c -19.30853,0 -34.96124,-6.19732 -34.96124,-13.8421 z"
id="path329"
style="fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 407.75415,414.42547 h 129.45172 v 27.68414 H 407.75415 Z"
id="path331"
style="fill:#d9d9d9;fill-rule:evenodd" />
<path
d="m 407.75415,414.42547 h 129.45172 v 27.68414 H 407.75415 Z"
id="path333"
style="fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:round" />
<path
d="m 531.60381,417.33447 h 11.23364 v 21.79218 h -11.23364 z"
id="path337"
style="fill:#d9d9d9;fill-rule:evenodd" />
<path
d="m 142.27297,408.80313 h 100 V 450 h -100 z"
id="path339"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<path
d="m 561.48035,403.13647 h 144.0315 v 41.19684 h -144.0315 z"
id="path343"
style="fill:#000000;fill-opacity:0;fill-rule:evenodd" />
<text
id="text4029"
y="29.335932"
x="245.80315"
style="font-style:normal;font-weight:normal;font-size:40px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
xml:space="preserve"><tspan
style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:18.66666603px;font-family:'Courier New';-inkscape-font-specification:'Courier New'"
y="29.335932"
x="245.80315"
id="tspan4027">drake::systems::LeafSystem</tspan></text>
<text
id="text4029-6"
y="399.2962"
x="398.78302"
style="font-style:normal;font-weight:normal;font-size:40px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
xml:space="preserve"><tspan
style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:18.66666603px;font-family:'Courier New';-inkscape-font-specification:'Courier New'"
y="399.2962"
x="398.78302"
id="tspan4027-2">drake::systems::LeafContext</tspan></text>
<text
id="text4221"
y="70.33593"
x="11.80315"
style="font-style:normal;font-weight:normal;font-size:40px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
xml:space="preserve"><tspan
style="font-size:18.66666603px"
y="70.33593"
x="11.80315"
id="tspan4219">Time (<tspan
id="tspan4223"
style="font-style:italic;font-size:18.66666603px">t</tspan>)</tspan></text>
<text
id="text4227"
y="106.33593"
x="4.8031502"
style="font-style:normal;font-weight:normal;font-size:40px;line-height:0.5;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
xml:space="preserve"><tspan
style="font-size:18.66666603px;fill:#0000ff"
id="tspan4229"
y="106.33593"
x="4.8031502">Input</tspan><tspan
id="tspan4243"
style="font-size:18.66666603px;fill:#0000ff"
y="127.13231"
x="4.8031502">ports (<tspan
id="tspan4231"
style="font-style:italic;font-size:18.66666603px;fill:#0000ff">u</tspan>)</tspan></text>
<flowRoot
transform="translate(-14.19685,-255.66407)"
style="font-style:normal;font-weight:normal;font-size:40px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
id="flowRoot4233"
xml:space="preserve"><flowRegion
id="flowRegion4235"><rect
y="290"
x="47"
height="59"
width="87"
id="rect4237" /></flowRegion><flowPara
id="flowPara4239" /></flowRoot> <text
id="text4248"
y="238.33594"
x="190.80315"
style="font-style:normal;font-weight:normal;font-size:16px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
xml:space="preserve"><tspan
y="238.33594"
x="190.80315"
id="tspan4246">Parameters (<tspan
id="tspan4250"
style="font-style:italic">p</tspan>)</tspan></text>
<text
id="text4248-5"
y="277.80469"
x="218.04924"
style="font-style:normal;font-weight:normal;font-size:16px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
xml:space="preserve"><tspan
y="277.80469"
x="218.04924"
id="tspan4246-5">State (<tspan
id="tspan4273"
style="font-style:italic">x</tspan>)</tspan></text>
<text
id="text4248-5-7"
y="435.80469"
x="166.80705"
style="font-style:normal;font-weight:normal;font-size:18.66666603px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
xml:space="preserve"><tspan
y="435.80469"
x="166.80705"
id="tspan4246-5-2">Values:</tspan></text>
<text
id="text4248-5-7-5"
y="437.28583"
x="283.6124"
style="font-style:normal;font-weight:normal;font-size:18.66666603px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
xml:space="preserve"><tspan
y="437.28583"
x="283.6124"
id="tspan4246-5-2-6">Source</tspan></text>
<text
id="text4248-5-7-3"
y="435.28583"
x="422.61243"
style="font-style:normal;font-weight:normal;font-size:18.66666603px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
xml:space="preserve"><tspan
y="435.28583"
x="422.61243"
id="tspan4246-5-2-5">Computed</tspan></text>
<text
id="text4248-5-7-7"
y="433.28583"
x="579.61243"
style="font-style:normal;font-weight:normal;font-size:16px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
xml:space="preserve"><tspan
y="433.28583"
x="579.61243"
id="tspan4246-5-2-59">(cache entry)</tspan></text>
<text
id="text4227-4"
y="176.84595"
x="743.30444"
style="font-style:normal;font-weight:normal;font-size:40px;line-height:0.5;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
xml:space="preserve"><tspan
style="font-size:18.66666603px;fill:#ff0000"
id="tspan4229-5"
y="176.84595"
x="743.30444">Output</tspan><tspan
id="tspan4243-4"
style="font-size:18.66666603px;fill:#ff0000"
y="197.64233"
x="743.30444">ports (y)</tspan></text>
<text
id="text4248-5-7-7-2"
y="75.285759"
x="740.61243"
style="font-style:italic;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;line-height:125%;font-family:'Accanthis ADF Std No2';-inkscape-font-specification:'Accanthis ADF Std No2, Italic';text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none"
xml:space="preserve"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan3772"
x="740.61243"
y="75.285759"
style="-inkscape-font-specification:'Amiri Quran, Normal';font-family:'Amiri Quran';font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;font-size:16.00000024px;text-anchor:start;text-align:start;writing-mode:lr;line-height:125%">derivatives,</tspan><tspan
sodipodi:role="line"
id="tspan3774"
x="740.61243"
y="95.285759"
style="-inkscape-font-specification:'Amiri Quran, Normal';font-family:'Amiri Quran';font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;font-size:16.00000024px;text-anchor:start;text-align:start;writing-mode:lr;line-height:125%">energy, etc.</tspan></text>
<text
id="text4248-5-7-7-5"
y="357.80469"
x="302.59222"
style="font-style:normal;font-weight:normal;font-size:16px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
xml:space="preserve"><tspan
y="357.80469"
x="302.59222"
id="tspan4246-5-2-59-1">Fixed inputs</tspan></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
x="749.30133"
y="84.902794"
id="text3768"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan3770"
x="749.30133"
y="84.902794"></tspan></text>
</svg>
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/rendering/multibody_position_to_geometry_pose.h | #pragma once
#include <memory>
#include "drake/multibody/plant/multibody_plant.h"
#include "drake/systems/framework/leaf_system.h"
namespace drake {
namespace systems {
namespace rendering {
// TODO(SeanCurtis-TRI) Move this out of systems/rendering into a more
// reasonable location so we can delete the rendering directory. It is used
// by applications that use sliders to control MBP positions and then directly
// input that to SceneGraph as poses. geometry_inspector.py and multibody
// examples (jupyter widgets). manipulation/util seems to be a reasonable
// location.
/**
* A direct-feedthrough system that converts a vector of joint positions
* directly to a geometry::FramePoseVector<T> to behave like a
* MultibodyPlant::get_geometry_pose_output_port().
*
* @system
* name: MultibodyPositionToGeometryPose
* input_ports:
* - position
* output_ports:
* - geometry_pose
* @endsystem
*
* The position input must be a vector whose length matches either the
* number of positions in the MultibodyPlant or the number of states (based
* on the optional argument in the constructor). This option to pass the full
* state vector is provided only for convenience -- only the position values
* will affect the outputs.
*
* @tparam_double_only
* @ingroup visualization
*/
template <typename T>
class MultibodyPositionToGeometryPose final : public LeafSystem<T> {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(MultibodyPositionToGeometryPose)
/**
* The %MultibodyPositionToGeometryPose holds an internal, non-owned
* reference to the MultibodyPlant object so you must ensure that @p plant
* has a longer lifetime than `this` %MultibodyPositionToGeometryPose system.
*
* @param input_multibody_state If true, the vector input port will be the
* size of the `plant` *state* vector. If false, it will be the size
* of the `plant` *position* vector. In both cases, only the
* positions will affect the output. @default false.
*
* @throws if `plant` is not finalized and registered with a SceneGraph.
*/
explicit MultibodyPositionToGeometryPose(
const multibody::MultibodyPlant<T>& plant,
bool input_multibody_state = false);
/**
* The %MultibodyPositionToGeometryPose owns its internal plant.
*
* @param input_multibody_state If true, the vector input port will be the
* size of the `plant` *state* vector. If false, it will be the size
* of the `plant` *position* vector. In both cases, only the
* positions will affect the output. @default: false.
*
* @throws if `owned_plant` is not finalized and registered with a SceneGraph.
*/
explicit MultibodyPositionToGeometryPose(
std::unique_ptr<multibody::MultibodyPlant<T>> owned_plant,
bool input_multibody_state = false);
~MultibodyPositionToGeometryPose() override = default;
const multibody::MultibodyPlant<T>& multibody_plant() const { return plant_; }
/** Returns true if this system owns its MultibodyPlant. */
bool owns_plant() const { return owned_plant_ != nullptr; }
private:
// Configure the input/output ports and prepare for calculation.
// @pre plant_ must reference a valid MBP.
void Configure(bool input_multibody_state);
void CalcGeometryPose(const Context<T>& context, AbstractValue* poses) const;
// NOTE: The constructor's correctness depends on these two members declared
// in this order (plant_ followed by owned_plant_). Do not change them.
const multibody::MultibodyPlant<T>& plant_;
// The optionally owned plant. If not null, owned_plant_ == &plant_ must be
// true.
const std::unique_ptr<multibody::MultibodyPlant<T>> owned_plant_;
// This is a context of the plant_ system, which is only owned here to avoid
// runtime allocation. It contains no relevant state.
mutable std::unique_ptr<Context<T>> plant_context_;
};
} // namespace rendering
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/rendering/BUILD.bazel | load("//tools/lint:lint.bzl", "add_lint_tests")
load(
"//tools/skylark:drake_cc.bzl",
"drake_cc_googletest",
"drake_cc_library",
"drake_cc_package_library",
)
package(default_visibility = ["//visibility:public"])
drake_cc_package_library(
name = "rendering",
visibility = ["//visibility:public"],
deps = [
":multibody_position_to_geometry_pose",
],
)
drake_cc_library(
name = "multibody_position_to_geometry_pose",
srcs = ["multibody_position_to_geometry_pose.cc"],
hdrs = ["multibody_position_to_geometry_pose.h"],
deps = [
"//common:pointer_cast",
"//geometry:kinematics_vector",
"//multibody/plant",
"//systems/framework:leaf_system",
],
)
drake_cc_googletest(
name = "multibody_position_to_geometry_pose_test",
data = ["@drake_models//:iiwa_description"],
deps = [
":multibody_position_to_geometry_pose",
"//common/test_utilities",
"//multibody/parsing",
"//systems/framework/test_utilities",
],
)
add_lint_tests(enable_clang_format_lint = False)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/rendering/multibody_position_to_geometry_pose.cc | #include "drake/systems/rendering/multibody_position_to_geometry_pose.h"
#include <utility>
#include <vector>
#include "drake/common/drake_assert.h"
#include "drake/common/pointer_cast.h"
namespace drake {
namespace systems {
namespace rendering {
template <typename T>
MultibodyPositionToGeometryPose<T>::MultibodyPositionToGeometryPose(
const multibody::MultibodyPlant<T>& plant, bool input_multibody_state)
: plant_(plant) {
Configure(input_multibody_state);
}
// Note: This constructor is not *obviously* correct. Compare it with this code:
// unique_ptr<Foo> f;
// bar(*f, std::move(f));
// The invocation to bar may not be valid because the compiler can chose to
// perform the move of f before the dereference (which would make the first
// parameter a reference to null). However, this constructor works because:
// "... non-static data members are initialized in order of declaration in the
// class definition."
// https://en.cppreference.com/w/cpp/language/initializer_list#Initialization_order
template <typename T>
MultibodyPositionToGeometryPose<T>::MultibodyPositionToGeometryPose(
std::unique_ptr<multibody::MultibodyPlant<T>> owned_plant,
bool input_multibody_state)
: plant_(*owned_plant), owned_plant_(std::move(owned_plant)) {
DRAKE_DEMAND(owned_plant_ != nullptr);
Configure(input_multibody_state);
}
template <typename T>
void MultibodyPositionToGeometryPose<T>::Configure(bool input_multibody_state) {
// Either we don't own the plant, or we own the plant we're storing the
// reference for.
DRAKE_DEMAND(owned_plant_ == nullptr || owned_plant_.get() == &plant_);
if (!plant_.is_finalized()) {
throw std::logic_error(
"MultibodyPositionToGeometryPose requires a MultibodyPlant that has "
"been finalized");
}
if (!plant_.geometry_source_is_registered()) {
throw std::logic_error(
"MultibodyPositionToGeometryPose requires a MultibodyPlant that has "
"been registered with a SceneGraph");
}
plant_context_ = plant_.CreateDefaultContext();
this->DeclareInputPort("position", kVectorValued,
input_multibody_state ? plant_.num_multibody_states()
: plant_.num_positions());
this->DeclareAbstractOutputPort(
"geometry_pose",
[this]() {
return this->plant_.get_geometry_poses_output_port().Allocate();
},
[this](const Context<T>& context, AbstractValue* output) {
return this->CalcGeometryPose(context, output);
});
// Fix all input ports in the Context to avoid leaving them unassigned.
// They should not impact the output values.
plant_.AllocateFixedInputs(plant_context_.get());
}
template <typename T>
void MultibodyPositionToGeometryPose<T>::CalcGeometryPose(
const Context<T>& context, AbstractValue* output) const {
// Set the positions in the owned (mutable) context so that we can ask the
// MultibodyPlant to compute the outputs.
// TODO(eric.cousineau): Place `plant_context_` in the cache of `context`,
// and remove mutable member.
plant_.SetPositions(
plant_context_.get(),
this->get_input_port().Eval(context).head(plant_.num_positions()));
// Evaluate the plant's output port.
plant_.get_geometry_poses_output_port().Calc(*plant_context_, output);
}
template class MultibodyPositionToGeometryPose<double>;
} // namespace rendering
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems/rendering | /home/johnshepherd/drake/systems/rendering/test/multibody_position_to_geometry_pose_test.cc | #include "drake/systems/rendering/multibody_position_to_geometry_pose.h"
#include <gtest/gtest.h>
#include "drake/common/test_utilities/eigen_matrix_compare.h"
#include "drake/common/test_utilities/expect_throws_message.h"
#include "drake/multibody/parsing/parser.h"
#include "drake/multibody/plant/multibody_plant.h"
namespace drake {
namespace systems {
namespace rendering {
namespace {
using geometry::SceneGraph;
using multibody::BodyIndex;
using multibody::MultibodyPlant;
using multibody::Parser;
using std::make_unique;
GTEST_TEST(MultibodyPositionToGeometryPoseTest, BadConstruction) {
{
MultibodyPlant<double> mbp(0.0);
mbp.Finalize();
DRAKE_EXPECT_THROWS_MESSAGE(
MultibodyPositionToGeometryPose<double>{mbp},
"MultibodyPositionToGeometryPose requires a MultibodyPlant that has "
"been registered with a SceneGraph");
}
{
MultibodyPlant<double> mbp(0.0);
SceneGraph<double> scene_graph;
mbp.RegisterAsSourceForSceneGraph(&scene_graph);
Parser(&mbp).AddModelsFromUrl(
"package://drake_models/iiwa_description/sdf/iiwa7_no_collision.sdf");
DRAKE_EXPECT_THROWS_MESSAGE(MultibodyPositionToGeometryPose<double>{mbp},
"MultibodyPositionToGeometryPose requires a "
"MultibodyPlant that has been finalized");
}
}
GTEST_TEST(MultibodyPositionToGeometryPoseTest, Ownership) {
auto mbp = make_unique<MultibodyPlant<double>>(0.0);
auto raw_ptr = mbp.get();
SceneGraph<double> scene_graph;
mbp->RegisterAsSourceForSceneGraph(&scene_graph);
Parser(mbp.get()).AddModelsFromUrl(
"package://drake_models/iiwa_description/sdf/iiwa7_no_collision.sdf");
mbp->Finalize();
const MultibodyPositionToGeometryPose<double> dut(std::move(mbp));
EXPECT_EQ(dut.get_input_port().size(),
dut.multibody_plant().num_positions());
EXPECT_EQ(&dut.multibody_plant(), raw_ptr);
EXPECT_TRUE(dut.owns_plant());
auto context = dut.CreateDefaultContext();
const Eigen::VectorXd position =
Eigen::VectorXd::LinSpaced(raw_ptr->num_positions(), 0.123, 0.456);
dut.get_input_port().FixValue(context.get(), position);
const auto& output =
dut.get_output_port().Eval<geometry::FramePoseVector<double>>(*context);
for (BodyIndex i(0); i < raw_ptr->num_bodies(); ++i) {
if (i == raw_ptr->world_body().index()) {
// The world geometry will not appear in the poses.
continue;
}
const std::optional<geometry::FrameId> id =
raw_ptr->GetBodyFrameIdIfExists(i);
EXPECT_TRUE(id.has_value());
EXPECT_TRUE(output.has_id(id.value()));
}
EXPECT_EQ(output.size(), raw_ptr->num_bodies() - 1);
}
GTEST_TEST(MultibodyPositionToGeometryPoseTest, InputOutput) {
MultibodyPlant<double> mbp(0.0);
SceneGraph<double> scene_graph;
mbp.RegisterAsSourceForSceneGraph(&scene_graph);
Parser(&mbp).AddModelsFromUrl(
"package://drake_models/iiwa_description/sdf/iiwa7_no_collision.sdf");
mbp.Finalize();
const MultibodyPositionToGeometryPose<double> dut(mbp);
EXPECT_FALSE(dut.owns_plant());
EXPECT_EQ(dut.get_input_port().size(), mbp.num_positions());
EXPECT_EQ(dut.get_input_port().get_index(), 0);
EXPECT_EQ(dut.get_output_port().get_index(), 0);
EXPECT_TRUE(dut.HasAnyDirectFeedthrough());
auto context = dut.CreateDefaultContext();
const Eigen::VectorXd position =
Eigen::VectorXd::LinSpaced(mbp.num_positions(), 0.123, 0.456);
dut.get_input_port().FixValue(context.get(), position);
const auto& output =
dut.get_output_port().Eval<geometry::FramePoseVector<double>>(*context);
for (BodyIndex i(0); i < mbp.num_bodies(); ++i) {
if (i == mbp.world_body().index()) {
// The world geometry will not appear in the poses.
continue;
}
const std::optional<geometry::FrameId> id = mbp.GetBodyFrameIdIfExists(i);
EXPECT_TRUE(id.has_value());
EXPECT_TRUE(output.has_id(id.value()));
}
EXPECT_EQ(output.size(), mbp.num_bodies() - 1);
}
// Confirm that we can pass in the larger state vector and it does not
// affect our results.
GTEST_TEST(MultibodyPositionToGeometryPoseTest, FullStateInput) {
auto mbp = make_unique<MultibodyPlant<double>>(0.0);
SceneGraph<double> scene_graph;
mbp->RegisterAsSourceForSceneGraph(&scene_graph);
Parser(mbp.get()).AddModelsFromUrl(
"package://drake_models/iiwa_description/sdf/iiwa7_no_collision.sdf");
mbp->Finalize();
const Eigen::VectorXd state =
Eigen::VectorXd::LinSpaced(mbp->num_multibody_states(), 0.123, 0.456);
const MultibodyPositionToGeometryPose<double> position_sys(*mbp, false);
EXPECT_EQ(position_sys.get_input_port().size(), mbp->num_positions());
auto position_context = position_sys.CreateDefaultContext();
position_sys.get_input_port().FixValue(position_context.get(),
state.head(mbp->num_positions()));
const auto& position_output =
position_sys.get_output_port().Eval<geometry::FramePoseVector<double>>(
*position_context);
const MultibodyPositionToGeometryPose<double> state_sys(*mbp, true);
EXPECT_EQ(state_sys.get_input_port().size(), mbp->num_multibody_states());
auto state_context = state_sys.CreateDefaultContext();
state_sys.get_input_port().FixValue(state_context.get(), state);
const auto& state_output =
state_sys.get_output_port().Eval<geometry::FramePoseVector<double>>(
*state_context);
EXPECT_EQ(position_output.size(), state_output.size());
for (const auto& id : position_output.ids()) {
EXPECT_TRUE(
position_output.value(id).IsExactlyEqualTo(state_output.value(id)));
}
// Test the ownership constructor also has the right size.
const MultibodyPositionToGeometryPose<double> owned_sys(std::move(mbp), true);
EXPECT_EQ(owned_sys.get_input_port().size(),
owned_sys.multibody_plant().num_multibody_states());
}
} // namespace
} // namespace rendering
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/region_of_attraction.cc | #include "drake/systems/analysis/region_of_attraction.h"
#include <algorithm>
#include "drake/math/continuous_lyapunov_equation.h"
#include "drake/math/matrix_util.h"
#include "drake/math/quadratic_form.h"
#include "drake/solvers/choose_best_solver.h"
#include "drake/solvers/mathematical_program.h"
#include "drake/solvers/solve.h"
#include "drake/systems/primitives/linear_system.h"
namespace drake {
namespace systems {
namespace analysis {
using Eigen::MatrixXd;
using math::IsPositiveDefinite;
using solvers::MathematicalProgram;
using solvers::Solve;
using symbolic::Environment;
using symbolic::Expression;
using symbolic::Polynomial;
using symbolic::Substitution;
using symbolic::Variable;
using symbolic::Variables;
namespace {
// Assumes V positive semi-definite at the origin.
// If the Hessian of Vdot is negative definite at the origin, then we use
// Vdot = 0 => V >= rho (or x=0) via
// maximize rho
// subject to (V-rho)*(x'*x)^d - Lambda*Vdot is SOS.
// If we cannot confirm negative definiteness, then we must ask instead for
// Vdot >=0 => V >= rho (or x=0).
Expression FixedLyapunovConvex(
const solvers::VectorXIndeterminate& x, const Expression& V,
const Expression& Vdot, const std::optional<solvers::SolverId>& solver_id,
const std::optional<solvers::SolverOptions>& solver_options) {
// Check if the Hessian of Vdot is negative definite.
Environment env;
for (int i = 0; i < x.size(); i++) {
env.insert(x(i), 0.0);
}
const Eigen::MatrixXd P =
symbolic::Evaluate(symbolic::Jacobian(Vdot.Jacobian(x), x), env);
const double tolerance = 1e-8;
bool Vdot_is_locally_negative_definite = IsPositiveDefinite(-P, tolerance);
Polynomial V_balanced, Vdot_balanced;
if (Vdot_is_locally_negative_definite) {
// Then "balance" V and Vdot.
const Eigen::MatrixXd S =
symbolic::Evaluate(symbolic::Jacobian(V.Jacobian(x), x), env);
const Eigen::MatrixXd T = math::BalanceQuadraticForms(S, -P);
const VectorX<Expression> Tx = T * x;
Substitution subs;
for (int i = 0; i < static_cast<int>(x.size()); i++) {
subs.emplace(x(i), Tx(i));
}
V_balanced = Polynomial(V.Substitute(subs));
Vdot_balanced = Polynomial(Vdot.Substitute(subs));
} else {
V_balanced = Polynomial(V);
Vdot_balanced = Polynomial(Vdot);
}
MathematicalProgram prog;
prog.AddIndeterminates(x);
const int V_degree = V_balanced.TotalDegree();
const int Vdot_degree = Vdot_balanced.TotalDegree();
// TODO(russt): Add this as an option once I have an example that needs it.
// This is a reasonable guess: we want the multiplier to be able to compete
// with terms in Vdot, and to be even (since it may be SOS below).
const int lambda_degree = std::ceil(Vdot_degree / 2.0) * 2;
const auto lambda = prog.NewFreePolynomial(Variables(x), lambda_degree);
const auto rho = prog.NewContinuousVariables<1>("rho")[0];
// Want (V-rho)(x'x)^d and Lambda*Vdot to be the same degree.
const int d = std::floor((lambda_degree + Vdot_degree - V_degree) / 2);
prog.AddSosConstraint(
((V_balanced - rho) * Polynomial(pow((x.transpose() * x)[0], d)) -
lambda * Vdot_balanced));
// If Vdot is indefinite, then the linearization does not inform us about the
// local stability. Add "lambda(x) is SOS" to confirm this local stability.
if (!Vdot_is_locally_negative_definite) {
prog.AddSosConstraint(lambda);
}
prog.AddCost(-rho);
solvers::MathematicalProgramResult result;
if (solver_id.has_value()) {
const auto solver = solvers::MakeSolver(solver_id.value());
solver->Solve(prog, std::nullopt, solver_options, &result);
} else {
result = Solve(prog, std::nullopt, solver_options);
}
DRAKE_THROW_UNLESS(result.is_success());
DRAKE_THROW_UNLESS(result.GetSolution(rho) > 0.0);
return V / result.GetSolution(rho);
}
// Variant of FixedLyapunovConvex which takes Vdot(x,xdot), and certifies the
// condition on the variety defined by the residuals g(x,xdot)=0.
// Vdot(x,z) = 0, g(x,z)=0 => V(x) >= rho (or x=0) via
// maximize rho
// subject to (V-rho)*(x'*x)^d - Lambda*Vdot + Lambda_g*g is SOS.
// If we cannot confirm negative definiteness, then we must ask instead for
// Vdot(x,z) >=0, g(x,z)=0 => V >= rho (or x=0).
Expression FixedLyapunovConvexImplicit(
const solvers::VectorXIndeterminate& x,
const solvers::VectorXIndeterminate& xdot, const Expression& V,
const Expression& Vdot, const VectorX<Expression>& g) {
// Check if the Hessian of Vdot is negative definite on the tangent space.
// Given Vdot(x,z) and g(x,z)=0, we wish to test whether yᵀQy ≤ 0 for all
// y where Gy=0, where y=[x,z], P = Hessian(Vdot,y), and G=dgdy. To do this,
// we find N as an orthonormal basis for the nullspace of G, and confirm that
// NᵀPN is negative definite.
Environment env;
for (int i = 0; i < x.size(); i++) {
env.insert(x(i), 0.0);
}
for (int i = 0; i < xdot.size(); i++) {
env.insert(xdot(i), 0.0);
}
solvers::VectorXIndeterminate y(x.size() + xdot.size());
y << x, xdot;
const Eigen::MatrixXd P =
symbolic::Evaluate(symbolic::Jacobian(Vdot.Jacobian(y), y), env);
const Eigen::MatrixXd G = symbolic::Evaluate(symbolic::Jacobian(g, y), env);
Eigen::FullPivLU<MatrixXd> lu(G);
MatrixXd N = lu.kernel();
const double tolerance = 1e-8;
bool Vdot_is_locally_negative_definite =
IsPositiveDefinite(-N.transpose() * P * N, tolerance);
Polynomial V_poly(V);
Polynomial Vdot_poly(Vdot);
// TODO(russt): implement balancing.
MathematicalProgram prog;
prog.AddIndeterminates(x);
prog.AddIndeterminates(xdot);
const int V_degree = V_poly.TotalDegree();
const int Vdot_degree = Vdot_poly.TotalDegree();
// TODO(russt): Add this as an option once I have an example that needs it.
// This is a reasonable guess: we want the multiplier to be able to compete
// with terms in Vdot, and to be even (since it may be SOS below).
const int lambda_degree = std::ceil(Vdot_degree / 2.0) * 2;
const Polynomial lambda = prog.NewFreePolynomial(Variables(y), lambda_degree);
VectorX<Polynomial> lambda_g(g.size());
VectorX<Polynomial> g_poly(g.size());
for (int i = 0; i < g.size(); ++i) {
// Want λ_g[i] * g[i] to have the same degree as λ * Vdot.
const int lambda_gi_degree = std::max(
lambda_degree + Vdot_degree - Polynomial(g[0]).TotalDegree(), 0);
lambda_g[i] = prog.NewFreePolynomial(Variables(y), lambda_gi_degree);
g_poly[i] = Polynomial(g[i]);
}
const auto rho = prog.NewContinuousVariables<1>("rho")[0];
// Want (V-rho)(x'x)^d and Lambda*Vdot to be the same degree.
const int d = std::floor((lambda_degree + Vdot_degree - V_degree) / 2);
prog.AddSosConstraint(
((V_poly - rho) * Polynomial(pow((x.transpose() * x)[0], d)) -
lambda * Vdot_poly + lambda_g.dot(g_poly)));
// If Vdot is indefinite, then the linearization does not inform us about the
// local stability. Add "lambda(x) is SOS" to confirm this local stability.
if (!Vdot_is_locally_negative_definite) {
prog.AddSosConstraint(lambda);
}
prog.AddCost(-rho);
const auto result = Solve(prog);
DRAKE_THROW_UNLESS(result.is_success());
DRAKE_THROW_UNLESS(result.GetSolution(rho) > 0.0);
return V / result.GetSolution(rho);
}
} // namespace
Expression RegionOfAttraction(const System<double>& system,
const Context<double>& context,
const RegionOfAttractionOptions& options) {
system.ValidateContext(context);
DRAKE_THROW_UNLESS(context.has_only_continuous_state());
const int num_states = context.num_continuous_states();
VectorX<double> x0 = context.get_continuous_state_vector().CopyToVector();
// Check that x0 is a fixed point.
VectorX<double> xdot0 =
system.EvalTimeDerivatives(context).get_vector().CopyToVector();
DRAKE_THROW_UNLESS(xdot0.template lpNorm<Eigen::Infinity>() <= 1e-14);
const auto symbolic_system = system.ToSymbolic();
const auto symbolic_context = symbolic_system->CreateDefaultContext();
symbolic_context->SetTimeStateAndParametersFrom(context);
symbolic_system->FixInputPortsFrom(system, context, symbolic_context.get());
// Subroutines should create their own programs to avoid incidental
// sharing of costs or constraints. However, we pass x and expect that
// sub-programs will use AddIndeterminates(x).
MathematicalProgram prog;
// Define the relative coordinates: x_bar = x - x0
const auto x_bar = prog.NewIndeterminates(num_states, "x");
Expression V;
bool user_provided_lyapunov_candidate =
!options.lyapunov_candidate.EqualTo(Expression::Zero());
if (user_provided_lyapunov_candidate) {
DRAKE_THROW_UNLESS(options.lyapunov_candidate.is_polynomial());
V = options.lyapunov_candidate;
Substitution subs;
subs.reserve(num_states);
// If necessary, replace the state variables.
if (options.state_variables.rows() > 0) {
for (int i = 0; i < num_states; i++) {
subs.emplace(options.state_variables(i), x0(i) + x_bar(i));
}
} else { // just change to relative coordinates.
for (int i = 0; i < num_states; i++) {
subs.emplace(x_bar(i), x0(i) + x_bar(i));
}
}
V = V.Substitute(subs);
// Check that V has the right Variables.
DRAKE_THROW_UNLESS(V.GetVariables().IsSubsetOf(Variables(x_bar)));
// Check that V is positive definite.
prog.AddSosConstraint(V);
solvers::MathematicalProgramResult result;
if (options.solver_id.has_value()) {
auto solver = solvers::MakeSolver(options.solver_id.value());
solver->Solve(prog, std::nullopt, options.solver_options, &result);
} else {
result = Solve(prog, std::nullopt, options.solver_options);
}
DRAKE_THROW_UNLESS(result.is_success());
} else {
// Solve a Lyapunov equation to find a candidate.
const auto linearized_system =
Linearize(system, context, InputPortSelection::kNoInput,
OutputPortSelection::kNoOutput);
const Eigen::MatrixXd Q = Eigen::MatrixXd::Identity(num_states, num_states);
const Eigen::MatrixXd P =
math::RealContinuousLyapunovEquation(linearized_system->A(), Q);
V = x_bar.dot(P * x_bar);
}
// Evaluate the dynamics (in relative coordinates).
symbolic_context->SetContinuousState(x0 + x_bar);
if (options.use_implicit_dynamics) {
const auto derivatives = symbolic_system->AllocateTimeDerivatives();
const solvers::VectorXIndeterminate xdot =
prog.NewIndeterminates(derivatives->size(), "xdot");
const Expression Vdot = V.Jacobian(x_bar).dot(xdot);
derivatives->SetFromVector(xdot.cast<Expression>());
VectorX<Expression> g(
symbolic_system->implicit_time_derivatives_residual_size());
symbolic_system->CalcImplicitTimeDerivativesResidual(*symbolic_context,
*derivatives, &g);
V = FixedLyapunovConvexImplicit(x_bar, xdot, V, Vdot, g);
} else {
const VectorX<Expression> f =
symbolic_system->EvalTimeDerivatives(*symbolic_context)
.get_vector()
.CopyToVector();
const Expression Vdot = V.Jacobian(x_bar).dot(f);
V = FixedLyapunovConvex(x_bar, V, Vdot, options.solver_id,
options.solver_options);
}
// Put V back into global coordinates.
Substitution subs;
subs.reserve(num_states);
if (options.state_variables.rows() > 0) {
for (int i = 0; i < num_states; i++) {
subs.emplace(x_bar(i), options.state_variables(i) - x0(i));
}
} else {
for (int i = 0; i < num_states; i++) {
subs.emplace(x_bar(i), x_bar(i) - x0(i));
}
}
V = V.Substitute(subs);
return V;
}
} // namespace analysis
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/hermitian_dense_output.h | #pragma once
#include <algorithm>
#include <limits>
#include <stdexcept>
#include <utility>
#include <vector>
#include "drake/common/default_scalars.h"
#include "drake/common/drake_bool.h"
#include "drake/common/drake_copyable.h"
#include "drake/common/eigen_types.h"
#include "drake/common/extract_double.h"
#include "drake/common/trajectories/piecewise_polynomial.h"
#include "drake/systems/analysis/stepwise_dense_output.h"
namespace drake {
namespace systems {
namespace internal {
/// Converts an STL vector of scalar type `S` elements to an STL vector
/// of double type elements, failing at runtime if the type cannot be
/// converted.
/// @see ExtractDoubleOrThrow(const T&)
/// @tparam S A valid Eigen scalar type.
template <typename S>
std::vector<double> ExtractDoublesOrThrow(const std::vector<S>& input_vector) {
std::vector<double> output_vector{};
output_vector.reserve(input_vector.size());
std::transform(input_vector.begin(), input_vector.end(),
std::back_inserter(output_vector),
[] (const S& value) {
return ExtractDoubleOrThrow(value);
});
return output_vector;
}
/// Converts an STL vector of matrices with scalar type `S` elements to an STL
/// vector of matrices with double type elements, failing at runtime if the type
/// cannot be converted.
/// @see ExtractDoublesOrThrow(const MatrixX<T>&)
/// @tparam S A valid Eigen scalar type.
template <typename S>
std::vector<MatrixX<double>>
ExtractDoublesOrThrow(const std::vector<MatrixX<S>>& input_vector) {
std::vector<MatrixX<double>> output_vector{};
output_vector.reserve(input_vector.size());
std::transform(input_vector.begin(), input_vector.end(),
std::back_inserter(output_vector),
[] (const MatrixX<S>& value) {
return ExtractDoubleOrThrow(value);
});
return output_vector;
}
} // namespace internal
/// A StepwiseDenseOutput class implementation using Hermitian interpolators,
/// and therefore a _continuous extension_ of the solution 𝐱(t) (see
/// [Engquist, 2105]). This concept can be recast as a type of dense output that
/// is continuous.
///
/// Updates take the form of integration steps, for which state 𝐱 and state time
/// derivative d𝐱/dt are known at least at both ends of the step. Hermite cubic
/// polynomials are then constructed upon @ref StepwiseDenseOutput::Consolidate
/// "consolidation", yielding a C1 extension of the solution 𝐱(t).
///
/// Hermitian continuous extensions exhibit the same truncation error as that
/// of the integration scheme being used for up to 3rd order schemes (see
/// [Hairer, 1993]).
///
/// From a performance standpoint, memory footprint and evaluation overhead
/// (i.e. the computational cost of an evaluation) increase linearly and
/// logarithmically with the amount of steps taken, respectively.
///
/// - [Engquist, 2105] B. Engquist. Encyclopedia of Applied and Computational
/// Mathematics, p. 339, Springer, 2015.
/// - [Hairer, 1993] E. Hairer, S. Nørsett and G. Wanner. Solving Ordinary
/// Differential Equations I (Nonstiff Problems), p.190,
/// Springer, 1993.
/// @tparam_default_scalar
template <typename T>
class HermitianDenseOutput final : public StepwiseDenseOutput<T> {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(HermitianDenseOutput)
/// An integration step representation class, holding just enough
/// for Hermitian interpolation: three (3) related sets containing
/// step times {t₀, ..., tᵢ₋₁, tᵢ} where tᵢ ∈ ℝ, step states
/// {𝐱₀, ..., 𝐱ᵢ₋₁, 𝐱ᵢ} where 𝐱ᵢ ∈ ℝⁿ, and state derivatives
/// {d𝐱/dt₀, ..., d𝐱/dtᵢ₋₁, d𝐱/dtᵢ} where d𝐱/dtᵢ ∈ ℝⁿ.
///
/// This step definition allows for intermediate time, state and state
/// derivative triplets (e.g. the integrator internal stages) to improve
/// interpolation.
///
/// @note The use of column matrices instead of plain vectors helps reduce
/// HermitianDenseOutput construction overhead, as this type of dense
/// output leverages a PiecewisePolynomial instance that takes matrices.
class IntegrationStep {
public:
DRAKE_DEFAULT_COPY_AND_MOVE_AND_ASSIGN(IntegrationStep)
/// Constructs an empty step.
IntegrationStep() = default;
/// Constructs a zero length step (i.e. a step containing a single time,
/// state and state derivative triplet) from column matrices.
///
/// @param initial_time Initial time t₀ where the step starts.
/// @param initial_state Initial state vector 𝐱₀ at @p initial_time
/// as a column matrix.
/// @param initial_state_derivative Initial state derivative vector
/// d𝐱/dt₀ at @p initial_time as a
/// column matrix.
/// @throws std::exception
/// if given @p initial_state 𝐱₀ is not a column matrix.<br>
/// if given @p initial_state_derivative d𝐱/t₀ is not a column
/// matrix.<br>
/// if given @p initial_state 𝐱₀ and @p initial_state_derivative
/// d𝐱/dt₀ do not match each other's dimension.
IntegrationStep(const T& initial_time, MatrixX<T> initial_state,
MatrixX<T> initial_state_derivative) {
ValidateStepExtendTripletOrThrow(initial_time, initial_state,
initial_state_derivative);
times_.push_back(initial_time);
states_.push_back(std::move(initial_state));
state_derivatives_.push_back(std::move(initial_state_derivative));
}
/// Extends the step forward in time from column matrices.
///
/// Provided @p time, @p state and @p state_derivative are appended
/// to the current step, effectively increasing its time length.
///
/// @param time Time tᵢ to extend the step to.
/// @param state State vector 𝐱ᵢ at @p time tᵢ as a column matrix.
/// @param state_derivative State derivative vector d𝐱/dtᵢ at @p time tᵢ
/// as a column matrix.
/// @throws std::exception
/// if given @p state 𝐱ᵢ is not a column matrix.<br>
/// if given @p state_derivative d𝐱/dtᵢ is not a column matrix.<br>
/// if given @p time tᵢ is not greater than the previous time
/// tᵢ₋₁ in the step.<br>
/// if given @p state 𝐱ᵢ dimension does not match the dimension of
/// the previous state 𝐱ᵢ₋₁.<br>
/// if given @p state 𝐱ᵢ and @p state_derivative d𝐱/dtᵢ do not
/// match each other's dimension.
void Extend(const T& time, MatrixX<T> state, MatrixX<T> state_derivative) {
ValidateStepExtendTripletOrThrow(time, state, state_derivative);
times_.push_back(time);
states_.push_back(std::move(state));
state_derivatives_.push_back(std::move(state_derivative));
}
/// Returns step start time t₀ (that of the first time, state and state
/// derivative triplet), which may coincide with its end time tᵢ (that of
/// the last time, state and state derivative triplet) if the step has zero
/// length (that is, it contains a single triplet).
const T& start_time() const { return times_.front(); }
/// Returns step end time tᵢ (that of the first time, state and state
/// derivative triplet), which may coincide with its start time t₀ (that of
/// the last time, state and state derivative triplet) if the step has zero
/// length (that is, it contains a single triplet).
const T& end_time() const { return times_.back(); }
/// Returns the step state 𝐱 size (i.e. dimension).
int size() const {
return states_.back().rows();
}
/// Returns step times {t₀, ..., tᵢ₋₁, tᵢ}.
const std::vector<T>& get_times() const { return times_; }
/// Returns step states {𝐱₀, ..., 𝐱ᵢ₋₁, 𝐱ᵢ} as column matrices.
const std::vector<MatrixX<T>>& get_states() const { return states_; }
/// Gets step state derivatives {d𝐱/dt₀, ..., d𝐱/dtᵢ₋₁, d𝐱/dtᵢ}
/// as column matrices.
const std::vector<MatrixX<T>>& get_state_derivatives() const {
return state_derivatives_;
}
private:
// Validates step update triplet for consistency between the triplet
// and with current step content.
//
// @see Extend(const T&, MatrixX<T>, MatrixX<T>)
void ValidateStepExtendTripletOrThrow(
const T& time, const MatrixX<T>& state,
const MatrixX<T>& state_derivative) {
if (state.cols() != 1) {
throw std::runtime_error("Provided state for step is "
"not a column matrix.");
}
if (state_derivative.cols() != 1) {
throw std::runtime_error("Provided state derivative for "
" step is not a column matrix.");
}
if (!times_.empty()) {
if (time < times_.front()) {
throw std::runtime_error("Step cannot be extended"
" backwards in time.");
}
if (time <= times_.back()) {
throw std::runtime_error("Step already extends up"
" to the given time.");
}
}
if (!states_.empty() && states_.back().rows() != state.rows()) {
throw std::runtime_error("Provided state dimensions do not "
"match that of the states in the step.");
}
if (state.rows() != state_derivative.rows()) {
throw std::runtime_error("Provided state and state derivative "
"dimensions do not match.");
}
}
// Step times, ordered in increasing order.
std::vector<T> times_{};
// Step states, ordered as to match its corresponding time in `times_`.
std::vector<MatrixX<T>> states_{};
// Step state derivatives, ordered as to match its corresponding
// time in `times_`.
std::vector<MatrixX<T>> state_derivatives_{};
};
HermitianDenseOutput() = default;
/// Initialize the DenseOutput with an existing trajectory.
explicit HermitianDenseOutput(
const trajectories::PiecewisePolynomial<T>& trajectory)
: start_time_(trajectory.start_time()),
end_time_(trajectory.end_time()) {
if constexpr (std::is_same_v<T, double>) {
continuous_trajectory_ = trajectory;
return;
}
// Create continuous_trajectory_ by converting all the segments to double.
using trajectories::PiecewisePolynomial;
const std::vector<T>& breaks = trajectory.get_segment_times();
for (int i = 0; i < trajectory.get_number_of_segments(); i++) {
const typename PiecewisePolynomial<T>::PolynomialMatrix& poly =
trajectory.getPolynomialMatrix(i);
MatrixX<Polynomiald> polyd = poly.unaryExpr([](const Polynomial<T>& p) {
return Polynomiald(
ExtractDoubleOrThrow(p.GetCoefficients()));
});
continuous_trajectory_.ConcatenateInTime(
PiecewisePolynomial<double>({polyd},
{ExtractDoubleOrThrow(breaks[i]),
ExtractDoubleOrThrow(breaks[i + 1])}));
}
}
/// Update output with the given @p step.
///
/// Provided @p step is queued for later consolidation. Note that
/// the time the @p step extends cannot be readily evaluated (see
/// StepwiseDenseOutput class documentation).
///
/// @param step Integration step to update this output with.
/// @throws std::exception
/// if given @p step has zero length.<br>
/// if given @p step does not ensure C1 continuity at the end of
/// this dense output.<br>
/// if given @p step dimensions does not match this dense output
/// dimensions.
void Update(IntegrationStep step) {
ValidateStepCanBeConsolidatedOrThrow(step);
raw_steps_.push_back(std::move(step));
}
void Rollback() override {
if (raw_steps_.empty()) {
throw std::logic_error("No updates to rollback.");
}
raw_steps_.pop_back();
}
void Consolidate() override {
if (raw_steps_.empty()) {
throw std::logic_error("No updates to consolidate.");
}
for (const IntegrationStep& step : raw_steps_) {
continuous_trajectory_.ConcatenateInTime(
trajectories::PiecewisePolynomial<double>::CubicHermite(
internal::ExtractDoublesOrThrow(step.get_times()),
internal::ExtractDoublesOrThrow(step.get_states()),
internal::ExtractDoublesOrThrow(step.get_state_derivatives())));
}
start_time_ = continuous_trajectory_.start_time();
end_time_ = continuous_trajectory_.end_time();
last_consolidated_step_ = std::move(raw_steps_.back());
raw_steps_.clear();
}
protected:
VectorX<T> DoEvaluate(const T& t) const override {
const MatrixX<double> matrix_value =
continuous_trajectory_.value(ExtractDoubleOrThrow(t));
return matrix_value.col(0).cast<T>();
}
T DoEvaluateNth(const T& t, const int n) const override {
return continuous_trajectory_.scalarValue(
ExtractDoubleOrThrow(t), n, 0);
}
bool do_is_empty() const override {
return continuous_trajectory_.empty();
}
int do_size() const override {
return continuous_trajectory_.rows();
}
const T& do_end_time() const override { return end_time_; }
const T& do_start_time() const override { return start_time_; }
private:
// Validates that the provided @p step can be consolidated into this
// dense output.
// @see Update(const IntegrationStep&)
void ValidateStepCanBeConsolidatedOrThrow(const IntegrationStep& step) {
if (step.start_time() == step.end_time()) {
throw std::runtime_error("Provided step has zero length "
"i.e. start time and end time "
"are equal.");
}
if (!raw_steps_.empty()) {
EnsureOutputConsistencyOrThrow(step, raw_steps_.back());
} else if (!continuous_trajectory_.empty()) {
EnsureOutputConsistencyOrThrow(step, last_consolidated_step_);
}
}
// Ensures that this dense output would remain consistent if the
// provided @p step were to be consolidated at its end.
// @param next_step Integration step to be taken.
// @param prev_step Last integration step consolidated or to be
// consolidated into dense output.
// @throws std::exception
// if given @p next_step does not ensure C1 continuity at the
// end of the given @p prev_step.<br>
// if given @p next_step dimensions does not match @p prev_step
// dimensions.
static void EnsureOutputConsistencyOrThrow(const IntegrationStep& next_step,
const IntegrationStep& prev_step) {
using std::abs;
using std::max;
if (prev_step.size() != next_step.size()) {
throw std::runtime_error("Provided step dimensions and previous"
" step dimensions do not match.");
}
// Maximum time misalignment between previous step and next step that
// can still be disregarded as a discontinuity in time.
const T& prev_end_time = prev_step.end_time();
const T& next_start_time = next_step.start_time();
const T allowed_time_misalignment =
max(abs(prev_end_time), T{1.}) * std::numeric_limits<T>::epsilon();
const T time_misalignment = abs(prev_end_time - next_start_time);
if (time_misalignment > allowed_time_misalignment) {
throw std::runtime_error("Provided step start time and"
" previous step end time differ.");
}
// We can't sanity check the state values when using symbolic expressions.
if constexpr (scalar_predicate<T>::is_bool) {
const MatrixX<T>& prev_end_state = prev_step.get_states().back();
const MatrixX<T>& next_start_state = next_step.get_states().front();
if (!prev_end_state.isApprox(next_start_state)) {
throw std::runtime_error(
"Provided step start state and previous step end state differ. "
"Cannot ensure C0 continuity.");
}
const MatrixX<T>& prev_end_state_derivative =
prev_step.get_state_derivatives().back();
const MatrixX<T>& next_start_state_derivative =
next_step.get_state_derivatives().front();
if (!prev_end_state_derivative.isApprox(next_start_state_derivative)) {
throw std::runtime_error(
"Provided step start state derivative and previous step end state "
"derivative differ. Cannot ensure C1 continuity.");
}
}
}
// TODO(hidmic): Remove redundant time-keeping member fields when
// PiecewisePolynomial supports return by-reference of its time extents.
// It currently returns them by-value, double type only, and thus the
// need for this storage in order to meet DenseOutput::start_time()
// and DenseOutput::end_time() API.
// The smallest time at which the output is defined.
T start_time_{};
// The largest time at which the output is defined.
T end_time_{};
// The last integration step consolidated into `continuous_trajectory_`,
// useful to validate the next integration steps.
// @see EnsureOutputConsistencyOrThrow
IntegrationStep last_consolidated_step_{};
// The integration steps taken but not consolidated yet (via Consolidate()).
std::vector<IntegrationStep> raw_steps_{};
// TODO(hidmic): When PiecewisePolynomial supports scalar types other than
// doubles, pass in the template parameter T to it too and remove all scalar
// type conversions. UPDATE(russt): New plan is to deprecate this class, as
// PiecewisePolynomial can serve the intended role by itself.
// The underlying PiecewisePolynomial continuous trajectory.
trajectories::PiecewisePolynomial<double> continuous_trajectory_{};
};
} // namespace systems
} // namespace drake
DRAKE_DECLARE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_SCALARS(
class drake::systems::HermitianDenseOutput)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/simulator_print_stats.cc | #include "drake/systems/analysis/simulator_print_stats.h"
#include <regex>
#include <string>
#include <fmt/core.h>
#include "drake/common/default_scalars.h"
#include "drake/common/nice_type_name.h"
#include "drake/systems/analysis/implicit_integrator.h"
#include "drake/systems/analysis/integrator_base.h"
#include "drake/systems/analysis/simulator.h"
namespace drake {
namespace systems {
template <typename T>
void PrintSimulatorStatistics(const Simulator<T>& simulator) {
const systems::IntegratorBase<T>& integrator = simulator.get_integrator();
std::string integrator_scheme_name =
NiceTypeName::RemoveNamespaces(NiceTypeName::Get(integrator));
// Remove "<double>" from the scheme name if it's in it.
// The other scalar type T=AutoDiffXd is more interesting and we keep it
// in the name.
if constexpr (std::is_same_v<T, double>) {
integrator_scheme_name =
std::regex_replace(integrator_scheme_name, std::regex("<double>"), "");
}
fmt::print("General stats regarding discrete updates:\n");
fmt::print("Number of time steps taken (simulator stats) = {:d}\n",
simulator.get_num_steps_taken());
fmt::print("Simulator publishes every time step: {}\n",
simulator.get_publish_every_time_step());
fmt::print("Number of publishes = {:d}\n", simulator.get_num_publishes());
fmt::print("Number of discrete updates = {:d}\n",
simulator.get_num_discrete_updates());
fmt::print("Number of \"unrestricted\" updates = {:d}\n",
simulator.get_num_unrestricted_updates());
if (integrator.get_num_steps_taken() == 0) {
fmt::print("\nNote: the following integrator took zero steps. The "
"simulator exclusively used the discrete solver.\n");
}
fmt::print(
"\nStats for integrator {} with {}:\n", integrator_scheme_name,
integrator.get_fixed_step_mode() ? "fixed steps" : "error control");
fmt::print("Number of time steps taken (integrator stats) = {:d}\n",
integrator.get_num_steps_taken());
if (!integrator.get_fixed_step_mode()) {
// Print statistics available only to error-controlled integrators.
fmt::print(
"Initial time step taken = {:10.6g} s\n",
ExtractDoubleOrThrow(integrator.get_actual_initial_step_size_taken()));
fmt::print("Largest time step taken = {:10.6g} s\n",
ExtractDoubleOrThrow(integrator.get_largest_step_size_taken()));
fmt::print("Smallest adapted step size = {:10.6g} s\n",
ExtractDoubleOrThrow(
integrator.get_smallest_adapted_step_size_taken()));
fmt::print("Number of steps shrunk due to error control = {:d}\n",
integrator.get_num_step_shrinkages_from_error_control());
}
fmt::print("Number of derivative evaluations = {:d}\n",
integrator.get_num_derivative_evaluations());
// These two statistics can only be nonzero with implicit integrators, but
// because they're available in IntegratorBase, we print them for all
// integrators as a sanity check.
fmt::print("Number of steps shrunk due to convergence-based failure = {:d}\n",
integrator.get_num_step_shrinkages_from_substep_failures());
fmt::print(
"Number of convergence-based step failures (should match) = {:d}\n",
integrator.get_num_substep_failures());
// Check if the integrator is implicit using dynamic casting. If it's
// implicit, we can print out a few more helpful statistics.
const systems::ImplicitIntegrator<T>* implicit_integrator =
dynamic_cast<const systems::ImplicitIntegrator<T>*>(
&(simulator.get_integrator()));
const bool integrator_is_implicit = (implicit_integrator != nullptr);
if (integrator_is_implicit) {
// In this section, we print statistics available only to implicit
// integrators.
if (integrator.supports_error_estimation()) {
// If the integrator supports error control, we include error estimator
// details. For each statistic, the first value, for just the
// "integrator", is computed by subtracting the error estimator's value
// from the total. The other two values are grabbed directly from the
// integrator's statistics. Note: Even if the integrator was run in
// fixed-step mode, they still run the error estimator (but don't use
// the results), which is why we still output the error estimator
// statistics.
if (integrator.get_fixed_step_mode()) {
// Warn the user that integrators that support error estimation will
// run the error estimator even in fixed-step mode.
fmt::print(
"Note: This implicit integrator was run in fixed-step mode, but "
"it supports error estimation, so the error estimator is "
"expected to have nonzero values in the following statistics.\n");
}
fmt::print(
"Implicit Integrator Statistics (integrator, error estimator, "
"total):\n");
fmt::print(
"Number of Derivative Evaluations = {:d}, {:d}, {:d}\n",
implicit_integrator->get_num_derivative_evaluations() -
implicit_integrator
->get_num_error_estimator_derivative_evaluations(),
implicit_integrator
->get_num_error_estimator_derivative_evaluations(),
implicit_integrator->get_num_derivative_evaluations());
fmt::print(
"Number of Jacobian Computations = {:d}, {:d}, {:d}\n",
implicit_integrator->get_num_jacobian_evaluations() -
implicit_integrator
->get_num_error_estimator_jacobian_evaluations(),
implicit_integrator->get_num_error_estimator_jacobian_evaluations(),
implicit_integrator->get_num_jacobian_evaluations());
fmt::print(
"Number of Derivative Evaluations for Jacobians = {:d}, {:d}, {:d}"
"\n",
implicit_integrator->get_num_derivative_evaluations_for_jacobian() -
implicit_integrator
->get_num_error_estimator_derivative_evaluations_for_jacobian(),
implicit_integrator
->get_num_error_estimator_derivative_evaluations_for_jacobian(),
implicit_integrator->get_num_derivative_evaluations_for_jacobian());
fmt::print(
"Number of Iteration Matrix Factorizations = {:d}, {:d}, {:d}\n",
implicit_integrator->get_num_iteration_matrix_factorizations() -
implicit_integrator
->get_num_error_estimator_iteration_matrix_factorizations(),
implicit_integrator
->get_num_error_estimator_iteration_matrix_factorizations(),
implicit_integrator->get_num_iteration_matrix_factorizations());
fmt::print("Number of Newton-Raphson Iterations = {:d}, {:d}, {:d}\n",
implicit_integrator->get_num_newton_raphson_iterations() -
implicit_integrator
->get_num_error_estimator_newton_raphson_iterations(),
implicit_integrator
->get_num_error_estimator_newton_raphson_iterations(),
implicit_integrator->get_num_newton_raphson_iterations());
} else {
// If the integrator used fixed-steps, we just print the total for each
// statistic.
fmt::print("Implicit Integrator Statistics:\n");
fmt::print("Number of Derivative Evaluations = {:d}\n",
implicit_integrator->get_num_derivative_evaluations());
fmt::print("Number of Jacobian Computations = {:d}\n",
implicit_integrator->get_num_jacobian_evaluations());
fmt::print(
"Number of Derivative Evaluations for Jacobians = {:d}\n",
implicit_integrator->get_num_derivative_evaluations_for_jacobian());
fmt::print(
"Number of Iteration Matrix Factorizations = {:d}\n",
implicit_integrator->get_num_iteration_matrix_factorizations());
fmt::print("Number of Newton-Raphson Iterations = {:d}\n",
implicit_integrator->get_num_newton_raphson_iterations());
}
}
}
DRAKE_DEFINE_FUNCTION_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS(
(&PrintSimulatorStatistics<T>))
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/runge_kutta3_integrator.cc | #include "drake/systems/analysis/runge_kutta3_integrator.h"
namespace drake {
namespace systems {
/*
* RK3-specific initialization function.
* @throws std::exception if *neither* the initial step size target nor the
* maximum step size have been set before calling.
*/
template <class T>
void RungeKutta3Integrator<T>::DoInitialize() {
using std::isnan;
const double kDefaultAccuracy = 1e-3; // Good for this particular integrator.
const double kLoosestAccuracy = 1e-1; // Integrator specific.
const double kMaxStepFraction = 0.1; // Fraction of max step size for
// less aggressive first step.
// Set an artificial step size target, if not set already.
if (isnan(this->get_initial_step_size_target())) {
// Verify that maximum step size has been set.
if (isnan(this->get_maximum_step_size()))
throw std::logic_error("Neither initial step size target nor maximum "
"step size has been set!");
this->request_initial_step_size_target(
this->get_maximum_step_size() * kMaxStepFraction);
}
// Sets the working accuracy to a good value.
double working_accuracy = this->get_target_accuracy();
// If the user asks for accuracy that is looser than the loosest this
// integrator can provide, use the integrator's loosest accuracy setting
// instead.
if (working_accuracy > kLoosestAccuracy)
working_accuracy = kLoosestAccuracy;
else if (isnan(working_accuracy))
working_accuracy = kDefaultAccuracy;
this->set_accuracy_in_use(working_accuracy);
}
template <class T>
bool RungeKutta3Integrator<T>::DoStep(const T& h) {
using std::abs;
Context<T>& context = *this->get_mutable_context();
const T t0 = context.get_time();
const T t1 = t0 + h;
// CAUTION: This is performance-sensitive inner loop code that uses dangerous
// long-lived references into state and cache to avoid unnecessary copying and
// cache invalidation. Be careful not to insert calls to methods that could
// invalidate any of these references before they are used.
// TODO(sherm1) Consider moving this notation description to IntegratorBase
// when it is more widely adopted.
// Notation: we're using numeric subscripts for times t₀ and t₁, and
// lower-case letter superscripts like t⁽ᵃ⁾ and t⁽ᵇ⁾ to indicate values
// for intermediate stages of which there are two here, a and b.
// State x₀ = {xc₀, xd₀, xa₀}. We modify only t and xc here, but
// derivative calculations depend on everything in the context, including t,
// x and inputs u (which may depend on t and x).
// Define x⁽ᵃ⁾ ≜ {xc⁽ᵃ⁾, xd₀, xa₀} and u⁽ᵃ⁾ ≜ u(t⁽ᵃ⁾, x⁽ᵃ⁾).
// Evaluate derivative xcdot₀ ← xcdot(t₀, x(t₀), u(t₀)). Copy the result
// into a temporary since we'll be calculating more derivatives below.
derivs0_->get_mutable_vector().SetFrom(
this->EvalTimeDerivatives(context).get_vector());
const VectorBase<T>& xcdot0 = derivs0_->get_vector();
// Cache: xcdot0 references a *copy* of the derivative result so is immune
// to subsequent evaluations.
// Compute the first intermediate state and derivative
// (at t⁽ᵃ⁾=t₀+h/2, x⁽ᵃ⁾, u⁽ᵃ⁾).
// This call marks t- and xc-dependent cache entries out of date, including
// the derivative cache entry. Note that xc is a live reference into the
// context -- subsequent changes through that reference are unobservable so
// will require manual out-of-date notifications.
VectorBase<T>& xc = context.SetTimeAndGetMutableContinuousStateVector(
t0 + h / 2); // t⁽ᵃ⁾ ← t₀ + h/2
xc.CopyToPreSizedVector(&save_xc0_); // Save xc₀ while we can.
xc.PlusEqScaled(h / 2, xcdot0); // xc⁽ᵃ⁾ ← xc₀ + h/2 xcdot₀
derivs1_->get_mutable_vector().SetFrom(
this->EvalTimeDerivatives(context).get_vector());
const VectorBase<T>& xcdot_a = derivs1_->get_vector(); // xcdot⁽ᵃ⁾
// Cache: xcdot_a references a *copy* of the derivative result so is immune
// to subsequent evaluations.
// Compute the second intermediate state and derivative
// (at t⁽ᵇ⁾=t₁, x⁽ᵇ⁾, u⁽ᵇ⁾).
// This call marks t- and xc-dependent cache entries out of date, including
// the derivative cache entry. (We already have the xc reference but must
// issue the out-of-date notification here since we're about to change it.)
context.SetTimeAndNoteContinuousStateChange(t1);
// xcⱼ ← xc₀ - h xcdot₀ + 2 h xcdot⁽ᵃ⁾
xc.SetFromVector(save_xc0_); // Restore xc ← xc₀.
xc.PlusEqScaled({{-h, xcdot0}, {2 * h, xcdot_a}});
const VectorBase<T>& xcdot_b = // xcdot⁽ᵇ⁾
this->EvalTimeDerivatives(context).get_vector();
// Cache: xcdot_b references the live derivative cache value, currently
// up to date but about to be marked out of date. We do not want to make
// an unnecessary copy of this data.
// Cache: we're about to write through the xc reference again, so need to
// mark xc-dependent cache entries out of date, including xcdot_b; time
// doesn't change here.
context.NoteContinuousStateChange();
// Calculate the final O(h³) state at t₁.
// xc₁ ← xc₀ + h/6 xcdot₀ + 2/3 h xcdot⁽ᵃ⁾ + h/6 xcdot⁽ᵇ⁾
xc.SetFromVector(save_xc0_); // Restore xc ← xc₀.
const T h6 = h / 6.0;
// Cache: xcdot_b still references the derivative cache value, which is
// unchanged, although it is marked out of date. xcdot0 and xcdot_a are
// unaffected.
xc.PlusEqScaled({{h6, xcdot0},
{4 * h6, xcdot_a},
{h6, xcdot_b}});
// If the size of the system has changed, the error estimate will no longer
// be sized correctly. Verify that the error estimate is the correct size.
DRAKE_DEMAND(this->get_error_estimate()->size() == xc.size());
// Calculate the error estimate using an Eigen vector then copy it to the
// continuous state vector, where the various state components can be
// analyzed.
// ε = | xc₁ - (xc₀ + h xcdot⁽ᵃ⁾) | = | xc₀ + h xcdot⁽ᵃ⁾ - xc₁ |
// TODO(sherm1) Set err_est_vec_ to xc0 at the start and use it above to
// avoid the need for save_xc0_ and this copy altogether.
err_est_vec_ = save_xc0_; // ε ← xc₀
xcdot_a.ScaleAndAddToVector(h, &err_est_vec_); // ε += h xcdot⁽ᵃ⁾
xc.ScaleAndAddToVector(-1.0, &err_est_vec_); // ε -= xc₁
err_est_vec_ = err_est_vec_.cwiseAbs();
this->get_mutable_error_estimate()->SetFromVector(err_est_vec_);
// RK3 always succeeds in taking its desired step.
return true;
}
} // namespace systems
} // namespace drake
DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS(
class drake::systems::RungeKutta3Integrator)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/simulator_config_functions.h | #pragma once
#include <string>
#include <vector>
#include "drake/common/default_scalars.h"
#include "drake/systems/analysis/integrator_base.h"
#include "drake/systems/analysis/simulator.h"
#include "drake/systems/analysis/simulator_config.h"
namespace drake {
namespace systems {
/** @addtogroup simulation
@{
@defgroup simulator_configuration Simulator configuration
Configuration helpers to control Simulator and IntegratorBase settings.
@}
*/
/** Resets the integrator used to advanced the continuous time dynamics of the
system associated with `simulator` according to the given arguments.
@param[in,out] simulator On input, a valid pointer to a Simulator. On output
the integrator for `simulator` is reset according to the given arguments.
@param[in] scheme Integration scheme to be used, e.g., "runge_kutta2". See
GetIntegrationSchemes() for a the list of valid options.
@param[in] max_step_size The IntegratorBase::set_maximum_step_size() value.
@returns A reference to the newly created integrator owned by `simulator`.
@tparam_nonsymbolic_scalar
@ingroup simulator_configuration */
template <typename T>
IntegratorBase<T>& ResetIntegratorFromFlags(
Simulator<T>* simulator,
const std::string& scheme,
const T& max_step_size);
/** Returns the allowed string values for the `scheme` parameter in
ResetIntegratorFromFlags() and SimulatorConfig::integration_scheme.
@ingroup simulator_configuration */
const std::vector<std::string>& GetIntegrationSchemes();
/** Modifies the `simulator` based on the given `config`. (Always replaces the
Integrator with a new one; be careful not to keep old references around.)
@param[in] config Configuration to be used. Contains values for both the
integrator and the simulator.
@param[in,out] simulator On input, a valid pointer to a Simulator. On output
the integrator for `simulator` is reset according to the given `config`.
@tparam_nonsymbolic_scalar
@ingroup simulator_configuration
@pydrake_mkdoc_identifier{config_sim} */
template <typename T>
void ApplySimulatorConfig(const SimulatorConfig& config,
drake::systems::Simulator<T>* simulator);
/** Reports the simulator's current configuration, including the configuration
of the integrator.
@param[in] simulator The Simulator to extract the configuration from.
@tparam_nonsymbolic_scalar
@note For non-double T (T=AutoDiffXd), doing ExtractSimulatorConfig will discard
the integrator's scalar type's extra information such as gradients.
@ingroup simulator_configuration */
template <typename T>
SimulatorConfig ExtractSimulatorConfig(
const drake::systems::Simulator<T>& simulator);
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/initial_value_problem.cc | #include "drake/systems/analysis/initial_value_problem.h"
#include <stdexcept>
#include "drake/systems/analysis/hermitian_dense_output.h"
#include "drake/systems/analysis/runge_kutta3_integrator.h"
#include "drake/systems/framework/continuous_state.h"
#include "drake/systems/framework/leaf_system.h"
namespace drake {
namespace systems {
namespace {
// A LeafSystem subclass used to describe parameterized ODE systems
// i.e. d𝐱/dt = f(t, 𝐱; 𝐤) where f : t ⨯ 𝐱 → ℝⁿ, t ∈ ℝ , 𝐱 ∈ ℝⁿ, 𝐤 ∈ ℝᵐ.
// The vector variable 𝐱 corresponds to the system state that is evolved
// through time t by the function f, which is in turn parameterized by a
// vector 𝐤.
//
// @tparam T The ℝ domain scalar type, which must be a valid Eigen scalar.
template <typename T>
class OdeSystem : public LeafSystem<T> {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(OdeSystem);
typedef typename InitialValueProblem<T>::OdeFunction SystemFunction;
// Constructs a system that will use the given @p system_function,
// parameterized as described by the @p param_model, to compute the
// derivatives and advance the @p state_model.
//
// @remarks Here, the 'model' term has been borrowed from LeafSystem
// terminology, where these vectors are used both to provide initial
// values and to convey information about the dimensionality of the
// variables involved.
//
// @param system_function The system function f(t, 𝐱; 𝐤).
// @param state_model The state model vector 𝐱₀, with initial values.
// @param param_model The parameter model vector 𝐤₀, with default values.
OdeSystem(const SystemFunction& system_function,
const VectorX<T>& state_model, const VectorX<T>& param_model);
protected:
void DoCalcTimeDerivatives(const Context<T>& context,
ContinuousState<T>* derivatives) const override;
private:
// General ODE system d𝐱/dt = f(t, 𝐱; 𝐤) function.
const SystemFunction system_function_;
};
template <typename T>
OdeSystem<T>::OdeSystem(
const typename OdeSystem<T>::SystemFunction& system_function,
const VectorX<T>& state_model, const VectorX<T>& param_model)
: system_function_(system_function) {
// Models system state after the given state model.
this->DeclareContinuousState(BasicVector<T>(state_model));
// Models system parameters after the given parameter model.
this->DeclareNumericParameter(BasicVector<T>(param_model));
}
template <typename T>
void OdeSystem<T>::DoCalcTimeDerivatives(
const Context<T>& context, ContinuousState<T>* derivatives) const {
// Retrieves the state vector. This cast is safe because the
// ContinuousState<T> of a LeafSystem<T> is flat i.e. it is just
// a BasicVector<T>, and the implementation deals with LeafSystem<T>
// instances only by design.
const BasicVector<T>& state_vector = dynamic_cast<const BasicVector<T>&>(
context.get_continuous_state_vector());
// Retrieves the parameter vector.
const BasicVector<T>& parameter_vector = context.get_numeric_parameter(0);
// Retrieves the derivatives vector. This cast is safe because the
// ContinuousState<T> of a LeafSystem<T> is flat i.e. it is just
// a BasicVector<T>, and the implementation deals with LeafSystem<T>
// instances only by design.
BasicVector<T>& derivatives_vector =
dynamic_cast<BasicVector<T>&>(derivatives->get_mutable_vector());
// Computes the derivatives vector using the given system function
// for the given time and state and with the given parameterization.
derivatives_vector.set_value(system_function_(context.get_time(),
state_vector.get_value(),
parameter_vector.get_value()));
}
} // namespace
template <typename T>
const double InitialValueProblem<T>::kDefaultAccuracy = 1e-4;
template <typename T>
const T InitialValueProblem<T>::kInitialStepSize = static_cast<T>(1e-4);
template <typename T>
const T InitialValueProblem<T>::kMaxStepSize = static_cast<T>(1e-1);
template <typename T>
InitialValueProblem<T>::InitialValueProblem(
const OdeFunction& ode_function, const Eigen::Ref<const VectorX<T>>& x0,
const Eigen::Ref<const VectorX<T>>& k) {
// Instantiates the system using the given initial conditions and parameters.
system_ = std::make_unique<OdeSystem<T>>(ode_function, x0, k);
// Allocates a new default integration context.
context_ = system_->CreateDefaultContext();
// Instantiates an explicit RK3 integrator by default.
integrator_ =
std::make_unique<RungeKutta3Integrator<T>>(*system_, context_.get());
// Sets step size and accuracy defaults.
integrator_->request_initial_step_size_target(
InitialValueProblem<T>::kInitialStepSize);
integrator_->set_maximum_step_size(InitialValueProblem<T>::kMaxStepSize);
integrator_->set_target_accuracy(InitialValueProblem<T>::kDefaultAccuracy);
}
template <typename T>
VectorX<T> InitialValueProblem<T>::Solve(const T& t0, const T& tf) const {
DRAKE_THROW_UNLESS(tf >= t0);
context_->SetTime(t0);
ResetState();
// Initializes integrator if necessary.
if (!integrator_->is_initialized()) {
integrator_->Initialize();
}
// Integrates up to the requested time.
integrator_->IntegrateWithMultipleStepsToTime(tf);
// Retrieves the state vector. This cast is safe because the
// ContinuousState<T> of a LeafSystem<T> is flat i.e. it is just
// a BasicVector<T>, and the implementation deals with LeafSystem<T>
// instances only by design.
const BasicVector<T>& state_vector = dynamic_cast<const BasicVector<T>&>(
context_->get_continuous_state_vector());
return state_vector.get_value();
}
template <typename T>
void InitialValueProblem<T>::ResetState() const {
system_->SetDefaultContext(context_.get());
// Keeps track of current step size and accuracy settings (regardless
// of whether these are actually used by the integrator instance or not).
const T max_step_size = integrator_->get_maximum_step_size();
const T initial_step_size = integrator_->get_initial_step_size_target();
const double target_accuracy = integrator_->get_target_accuracy();
// Resets the integrator internal state.
integrator_->Reset();
// Sets integrator settings again.
integrator_->set_maximum_step_size(max_step_size);
if (integrator_->supports_error_estimation()) {
// Specifies initial step and accuracy setting only if necessary.
integrator_->request_initial_step_size_target(initial_step_size);
integrator_->set_target_accuracy(target_accuracy);
}
}
template <typename T>
std::unique_ptr<DenseOutput<T>> InitialValueProblem<T>::DenseSolve(
const T& t0, const T& tf) const {
DRAKE_THROW_UNLESS(tf >= t0);
context_->SetTime(t0);
ResetState();
// Unconditionally re-initialize integrator.
integrator_->Initialize();
// Starts dense integration to build a dense output.
integrator_->StartDenseIntegration();
// Steps the integrator through the entire interval.
integrator_->IntegrateWithMultipleStepsToTime(tf);
// Stops dense integration to prevent future updates to
// the dense output just built and yields it to the caller.
const std::unique_ptr<trajectories::PiecewisePolynomial<T>> traj =
integrator_->StopDenseIntegration();
return std::make_unique<HermitianDenseOutput<T>>(*traj);
}
} // namespace systems
} // namespace drake
DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS(
class drake::systems::InitialValueProblem)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/antiderivative_function.cc | #include "drake/systems/analysis/antiderivative_function.h"
namespace drake {
namespace systems {
template <typename T>
AntiderivativeFunction<T>::AntiderivativeFunction(
const IntegrableFunction& integrable_function,
const Eigen::Ref<const VectorX<T>>& k) {
// Expresses the scalar integral to be solved as an ODE.
typename ScalarInitialValueProblem<T>::ScalarOdeFunction scalar_ode_function =
[integrable_function](const T& t, const T& x,
const VectorX<T>& params) -> T {
unused(x);
return integrable_function(t, params);
};
// Instantiates the scalar initial value problem.
scalar_ivp_ = std::make_unique<ScalarInitialValueProblem<T>>(
scalar_ode_function, 0.0, k);
}
template <typename T>
T AntiderivativeFunction<T>::Evaluate(const T& v, const T& u) const {
return this->scalar_ivp_->Solve(v, u);
}
template <typename T>
std::unique_ptr<ScalarDenseOutput<T>>
AntiderivativeFunction<T>::MakeDenseEvalFunction(const T& v, const T& w) const {
return this->scalar_ivp_->DenseSolve(v, w);
}
} // namespace systems
} // namespace drake
DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS(
class drake::systems::AntiderivativeFunction)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/velocity_implicit_euler_integrator.h | #pragma once
#include <memory>
#include <stdexcept>
#include "drake/common/autodiff.h"
#include "drake/common/default_scalars.h"
#include "drake/common/drake_copyable.h"
#include "drake/systems/analysis/implicit_integrator.h"
#include "drake/systems/framework/basic_vector.h"
namespace drake {
namespace systems {
/**
* A first-order, fully implicit integrator optimized for second-order systems,
* with a second-order error estimate.
*
* The velocity-implicit Euler integrator is a variant of the first-order
* implicit Euler that takes advantage of the simple mapping q̇ = N(q) v
* of second order systems to formulate a smaller problem in velocities (and
* miscellaneous states if any) only. For systems with second-order dynamics,
* %VelocityImplicitEulerIntegrator formulates a problem that is half as large
* as that formulated by Drake's ImplicitEulerIntegrator, resulting in improved
* run-time performance. Upon convergence of the resulting system of equations,
* this method provides the same discretization as ImplicitEulerIntegrator, but
* at a fraction of the computational cost.
*
* This integrator requires a system of ordinary differential equations (ODEs)
* in state `x = (q,v,z)` to be expressible in the following form:
*
* q̇ = N(q) v; (1)
* ẏ = f_y(t,q,y), (2)
* where `q̇` and `v` are linearly related via the kinematic mapping `N(q)`,
* `y = (v,z)`, and `f_y` is a function that can depend on the time and state.
*
* Implicit Euler uses the following update rule at time step n:
*
* qⁿ⁺¹ = qⁿ + h N(qⁿ⁺¹) vⁿ⁺¹; (3)
* yⁿ⁺¹ = yⁿ + h f_y(tⁿ⁺¹,qⁿ⁺¹,yⁿ⁺¹). (4)
*
* To solve the nonlinear system for `(qⁿ⁺¹,yⁿ⁺¹)`, the velocity-implicit Euler
* integrator iterates with a modified Newton's method: At iteration `k`, it
* finds a `(qₖ₊₁,yₖ₊₁)` that attempts to satisfy
*
* qₖ₊₁ = qⁿ + h N(qₖ) vₖ₊₁. (5)
* yₖ₊₁ = yⁿ + h f_y(tⁿ⁺¹,qₖ₊₁,yₖ₊₁); (6)
*
* In this notation, the `n`'s index time steps, while the `k`'s index the
* specific Newton-Raphson iterations within each time step.
*
* Notice that we've intentionally lagged N(qₖ) one iteration behind in Eq (5).
* This allows it to substitute (5) into (6) to obtain a non-linear system in
* `y` only. Contrast this strategy with the one implemented by
* ImplicitEulerIntegrator, which solves a larger non-linear system in the full
* state x.
*
* To find a `(qₖ₊₁,yₖ₊₁)` that approximately satisfies (5-6), we linearize
* the system (5-6) to compute a Newton step. Define
*
* ℓ(y) = f_y(tⁿ⁺¹,qⁿ + h N(qₖ) v,y), (7)
* Jₗ(y) = ∂ℓ(y) / ∂y. (8)
*
* To advance the Newton step, the velocity-implicit Euler integrator solves
* the following linear equation for `Δy`:
*
* (I - h Jₗ) Δy = - R(yₖ), (9)
* where `R(y) = y - yⁿ - h ℓ(y)` and `Δy = yₖ₊₁ - yₖ`. The `Δy` solution
* directly gives us `yₖ₊₁`. It then substitutes the `vₖ₊₁` component of `yₖ₊₁`
* in (5) to get `qₖ₊₁`.
*
* This implementation uses a Newton method and relies upon the convergence
* to a solution for `y` in `R(y) = 0` where `R(y) = y - yⁿ - h ℓ(y)`
* as `h` becomes sufficiently small. General implementational details for
* the Newton method were gleaned from Section IV.8 in [Hairer, 1996].
*
* ### Error Estimation
*
* In this integrator, we simultaneously take a large step at the requested
* step size of h as well as two half-sized steps each with step size `h/2`.
* The result from two half-sized steps is propagated as the solution, while
* the difference between the two results is used as the error estimate for the
* propagated solution. This error estimate is accurate to the second order.
*
* To be precise, let `x̅ⁿ⁺¹` be the computed solution from a large step,
* `x̃ⁿ⁺¹` be the computed solution from two small steps, and `xⁿ⁺¹` be the true
* solution. Since the integrator propagates `x̃ⁿ⁺¹` as its solution, we denote
* the true error vector as `ε = x̃ⁿ⁺¹ - xⁿ⁺¹`. VelocityImplicitEulerIntegrator
* uses `ε* = x̅ⁿ⁺¹ - x̃ⁿ⁺¹`, the difference between the two solutions, as the
* second-order error estimate, because for a smooth system, `‖ε*‖ = O(h²)`,
* and `‖ε - ε*‖ = O(h³)`. See the notes in
* VelocityImplicitEulerIntegrator<T>::get_error_estimate_order() for a
* detailed derivation of the error estimate's truncation error.
*
* In this implementation, VelocityImplicitEulerIntegrator<T> attempts the
* large full-sized step before attempting the two small half-sized steps,
* because the large step is more likely to fail to converge, and if it is
* performed first, convergence failures are detected early, avoiding the
* unnecessary effort of computing potentially-successful small steps.
*
* - [Hairer, 1996] E. Hairer and G. Wanner. Solving Ordinary Differential
* Equations II (Stiff and Differential-Algebraic Problems).
* Springer, 1996, Section IV.8, p. 118–130.
*
* @note In the statistics reported by IntegratorBase, all statistics that deal
* with the number of steps or the step sizes will track the large full-sized
* steps. This is because the large full-sized `h` is the smallest irrevocable
* time-increment advanced by this integrator: if, for example, the second small
* half-sized step fails, this integrator revokes to the state before the first
* small step. This behavior is similar to other integrators with multi-stage
* evaluation: the step-counting statistics treat a "step" as the combination of
* all the stages.
* @note Furthermore, because the small half-sized steps are propagated as the
* solution, the large full-sized step is the error estimator, and the error
* estimation statistics track the effort during the large full-sized step. If
* the integrator is not in full-Newton mode (see
* ImplicitIntegrator<T>::set_use_full_newton()), most of the work incurred by
* constructing and factorizing matrices and by failing Newton-Raphson
* iterations will be counted toward the error estimation statistics, because
* the large step is performed first.
* @note This integrator uses the integrator accuracy setting, even when run
* in fixed-step mode, to limit the error in the underlying Newton-Raphson
* process. See IntegratorBase::set_target_accuracy() for more info.
*
* @see ImplicitIntegrator class documentation for information about implicit
* integration methods in general.
* @see ImplicitEulerIntegrator class documentation for information about
* the "implicit Euler" integration method.
*
* @tparam_nonsymbolic_scalar
* @ingroup integrators
*/
template <class T>
class VelocityImplicitEulerIntegrator final : public ImplicitIntegrator<T> {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(VelocityImplicitEulerIntegrator)
~VelocityImplicitEulerIntegrator() override = default;
explicit VelocityImplicitEulerIntegrator(const System<T>& system,
Context<T>* context = nullptr)
: ImplicitIntegrator<T>(system, context) {}
/**
* Returns true, because this integrator supports error estimation.
*/
bool supports_error_estimation() const final { return true; }
/**
* Returns the asymptotic order of the difference between the large and small
* steps (from which the error estimate is computed), which is 2. That is, the
* error estimate, `ε* = x̅ⁿ⁺¹ - x̃ⁿ⁺¹` has the property that `‖ε*‖ = O(h²)`,
* and it deviates from the true error, `ε`, by `‖ε - ε*‖ = O(h³)`.
*
* ### Derivation of the asymptotic order
*
* To derive the second-order error estimate, let us first define the vector-
* valued function `e(tⁿ, h, xⁿ) = x̅ⁿ⁺¹ - xⁿ⁺¹`, the local truncation error
* for a single, full-sized velocity-implicit Euler integration step, with
* initial conditions `(tⁿ, xⁿ)`, and a step size of `h`. Furthermore, use
* `ẍ` to denote `df/dt`, and `∇f` and `∇ẍ` to denote the Jacobians `df/dx`
* and `dẍ/dx` of the ODE system `ẋ = f(t, x)`. Note that `ẍ` uses a total
* time derivative, i.e., `ẍ = ∂f/∂t + ∇f f`.
*
* Let us use `x*` to denote the true solution after a half-step, `x(tⁿ+½h)`,
* and `x̃*` to denote the velocity-implicit Euler solution after a single
* half-sized step. Furthermore, let us use `xⁿ*¹` to denote the true solution
* of the system at time `t = tⁿ+h` if the system were at `x̃*` when
* `t = tⁿ+½h`. See the following diagram for an illustration.
*
* Legend:
* ───── propagation along the true system
* :···· propagation using implicit Euler with a half step
* :---- propagation using implicit Euler with a full step
*
* Time tⁿ tⁿ+½h tⁿ+h
*
* State :----------------------- x̅ⁿ⁺¹ <─── used for error estimation
* :
* :
* :
* : :·········· x̃ⁿ⁺¹ <─── propagated result
* : :
* :········· x̃* ─────── xⁿ*¹
* :
* xⁿ ─────── x* ─────── xⁿ⁺¹ <─── true solution
*
* We will use superscripts to denote evaluating an expression with `x` at
* that subscript and `t` at the corresponding time, e.g. `ẍⁿ` denotes
* `ẍ(tⁿ, xⁿ)`, and `f*` denotes `f(tⁿ+½h, x*)`. We first present a shortened
* derivation, followed by the longer, detailed version.
*
* We know the local truncation error for the implicit Euler method is:
*
* e(tⁿ, h, xⁿ) = x̅ⁿ⁺¹ - xⁿ⁺¹ = ½ h²ẍⁿ + O(h³). (10)
*
* The local truncation error ε from taking two half steps is composed of
* these two terms:
*
* e₁ = xⁿ*¹ - xⁿ⁺¹ = (1/8) h²ẍⁿ + O(h³), (15)
* e₂ = x̃ⁿ⁺¹ - xⁿ*¹ = (1/8) h²ẍⁿ + O(h³). (20)
*
* Taking the sum,
*
* ε = x̃ⁿ⁺¹ - xⁿ⁺¹ = e₁ + e₂ = (1/4) h²ẍⁿ + O(h³). (21)
*
* These two estimations allow us to obtain an estimation of the local error
* from the difference between the available quantities x̅ⁿ⁺¹ and x̃ⁿ⁺¹:
*
* ε* = x̅ⁿ⁺¹ - x̃ⁿ⁺¹ = e(tⁿ, h, xⁿ) - ε,
* = (1/4) h²ẍⁿ + O(h³), (22)
*
* and therefore our error estimate is second order.
*
* Below we will show this derivation in detail along with the proof that
* `‖ε - ε*‖ = O(h³)`:
*
* Let us look at a single velocity-implicit Euler step. Upon Newton-Raphson
* convergence, the truncation error for velocity-implicit Euler, which is the
* same as the truncation error for implicit Euler (because both methods solve
* Eqs. (3-4)), is
*
* e(tⁿ, h, xⁿ) = ½ h²ẍⁿ⁺¹ + O(h³)
* = ½ h²ẍⁿ + O(h³). (10)
*
* To see why the two are equivalent, we can Taylor expand about `(tⁿ, xⁿ)`,
*
* ẍⁿ⁺¹ = ẍⁿ + h dẍ/dtⁿ + O(h²) = ẍⁿ + O(h).
* e(tⁿ, h, xⁿ) = ½ h²ẍⁿ⁺¹ + O(h³) = ½ h²(ẍⁿ + O(h)) + O(h³)
* = ½ h²ẍⁿ + O(h³).
*
* Moving on with our derivation, after one small half-sized implicit Euler
* step, the solution `x̃*` is
*
* x̃* = x* + e(tⁿ, ½h, xⁿ)
* = x* + (1/8) h²ẍⁿ + O(h³),
* x̃* - x* = (1/8) h²ẍⁿ + O(h³). (11)
*
* Taylor expanding about `t = tⁿ+½h` in this `x = x̃*` alternate reality,
*
* xⁿ*¹ = x̃* + ½h f(tⁿ+½h, x̃*) + O(h²). (12)
*
* Similarly, Taylor expansions about `t = tⁿ+½h` and the true solution
* `x = x*` also give us
*
* xⁿ⁺¹ = x* + ½h f* + O(h²), (13)
* f(tⁿ+½h, x̃*) = f* + (∇f*) (x̃* - x*) + O(‖x̃* - x*‖²)
* = f* + O(h²), (14)
* where in the last line we substituted Eq. (11).
*
* Eq. (12) minus Eq. (13) gives us,
*
* xⁿ*¹ - xⁿ⁺¹ = x̃* - x* + ½h(f(tⁿ+½h, x̃*) - f*) + O(h³),
* = x̃* - x* + O(h³),
* where we just substituted in Eq. (14). Finally, substituting in Eq. (11),
*
* e₁ = xⁿ*¹ - xⁿ⁺¹ = (1/8) h²ẍⁿ + O(h³). (15)
*
* After the second small step, the solution `x̃ⁿ⁺¹` is
*
* x̃ⁿ⁺¹ = xⁿ*¹ + e(tⁿ+½h, ½h, x̃*),
* = xⁿ*¹ + (1/8)h² ẍ(tⁿ+½h, x̃*) + O(h³). (16)
*
* Taking Taylor expansions about `(tⁿ, xⁿ)`,
*
* x* = xⁿ + ½h fⁿ + O(h²) = xⁿ + O(h). (17)
* x̃* - xⁿ = (x̃* - x*) + (x* - xⁿ) = O(h), (18)
* where we substituted in Eqs. (11) and (17), and
*
* ẍ(tⁿ+½h, x̃*) = ẍⁿ + ½h ∂ẍ/∂tⁿ + ∇ẍⁿ (x̃* - xⁿ) + O(h ‖x̃* - xⁿ‖)
* = ẍⁿ + O(h), (19)
* where we substituted in Eq. (18).
*
* Substituting Eqs. (19) and (15) into Eq. (16),
*
* x̃ⁿ⁺¹ = xⁿ*¹ + (1/8) h²ẍⁿ + O(h³) (20)
* = xⁿ⁺¹ + (1/4) h²ẍⁿ + O(h³),
* therefore
*
* ε = x̃ⁿ⁺¹ - xⁿ⁺¹ = (1/4) h² ẍⁿ + O(h³). (21)
*
* Subtracting Eq. (21) from Eq. (10),
*
* e(tⁿ, h, xⁿ) - ε = (½ - 1/4) h²ẍⁿ + O(h³);
* ⇒ ε* = x̅ⁿ⁺¹ - x̃ⁿ⁺¹ = (1/4) h²ẍⁿ + O(h³). (22)
*
* Eq. (22) shows that our error estimate is second-order. Since the first
* term on the RHS matches `ε` (Eq. (21)),
*
* ε* = ε + O(h³). (23)
*/
int get_error_estimate_order() const final { return 2; }
private:
int64_t do_get_num_newton_raphson_iterations() const final {
return num_nr_iterations_;
}
// These methods return the effort done by the large step, which is the error
// estimator for the half-sized steps.
int64_t do_get_num_error_estimator_derivative_evaluations() const final {
return this->get_num_derivative_evaluations() -
num_half_vie_function_evaluations_;
}
int64_t do_get_num_error_estimator_derivative_evaluations_for_jacobian()
const final {
return this->get_num_derivative_evaluations_for_jacobian() -
num_half_vie_jacobian_function_evaluations_;
}
int64_t do_get_num_error_estimator_newton_raphson_iterations() const final {
return this->get_num_newton_raphson_iterations() -
num_half_vie_nr_iterations_;
}
int64_t do_get_num_error_estimator_jacobian_evaluations() const final {
return this->get_num_jacobian_evaluations() -
num_half_vie_jacobian_reforms_;
}
int64_t do_get_num_error_estimator_iteration_matrix_factorizations()
const final {
return this->get_num_iteration_matrix_factorizations() -
num_half_vie_iter_factorizations_;
}
void DoResetCachedJacobianRelatedMatrices() final {
Jy_vie_.resize(0, 0);
iteration_matrix_vie_ = {};
}
void DoResetImplicitIntegratorStatistics() final;
static void ComputeAndFactorImplicitEulerIterationMatrix(
const MatrixX<T>& J, const T& h,
typename ImplicitIntegrator<T>::IterationMatrix* iteration_matrix);
void DoInitialize() final;
bool DoImplicitIntegratorStep(const T& h) final;
// Steps the system forward by a single step of h using the velocity-implicit
// Euler method.
// @param t0 the time at the left end of the integration interval.
// @param h the time increment to step forward.
// @param xn the continuous state at t0, which is xⁿ.
// @param xtplus_guess the starting guess for xⁿ⁺¹.
// @param [out] xtplus the computed value for xⁿ⁺¹ on successful return.
// @param [in, out] iteration_matrix the cached iteration matrix, which is
// updated if get_use_full_newton() is true, if get_reuse() is false,
// or if the Newton-Raphson fails to converge on the first try.
// @param [in, out] Jy the cached Jacobian Jₗ(y), which is updated if
// get_use_full_newton() is true, if get_reuse() is false, or if the
// Newton-Raphson fails to converge on the second try.
// @param trial the attempt for this approach (1-4).
// StepVelocityImplicitEuler() uses increasingly computationally
// expensive methods as the trial numbers increase.
// @returns `true` if the step of size `h` was successful, `false` otherwise.
// @note The time and continuous state in the context are indeterminate upon
// exit.
bool StepVelocityImplicitEuler(
const T& t0, const T& h, const VectorX<T>& xn,
const VectorX<T>& xtplus_guess, VectorX<T>* xtplus,
typename ImplicitIntegrator<T>::IterationMatrix* iteration_matrix,
MatrixX<T>* Jy, int trial = 1);
// Steps the system forward by two half-sized steps of size h/2 using the
// velocity-implicit Euler method, and keeps track of separate statistics
// for the derivative evaluations, matrix refactorizations, and Jacobian
// recomputations during these half-sized steps. This method calls
// StepVelocityImplicitEuler() up to twice to perform the
// two half-sized steps.
// @param t0 the time at the left end of the integration interval.
// @param h the combined time increment to step forward.
// @param xn the continuous state at t0, which is xⁿ.
// @param xtplus_guess the starting guess for xⁿ⁺¹.
// @param [out] xtplus the computed value for xⁿ⁺¹ on successful return.
// @param [in, out] iteration_matrix the cached iteration matrix, which is
// updated if either StepVelocityImplicitEuler() calls update it.
// @param [in, out] Jy the cached Jacobian Jₗ(y), which is updated if
// either StepVelocityImplicitEuler() calls update it.
// @returns `true` if both steps were successful, `false` otherwise.
// @note The time and continuous state in the context are indeterminate upon
// exit.
bool StepHalfVelocityImplicitEulers(
const T& t0, const T& h, const VectorX<T>& xn,
const VectorX<T>& xtplus_guess, VectorX<T>* xtplus,
typename ImplicitIntegrator<T>::IterationMatrix* iteration_matrix,
MatrixX<T>* Jy);
// Takes a large velocity-implicit Euler step (of size h) and two half-sized
// velocity-implicit Euler steps (of size h/2), if possible.
// @param t0 the time at the left end of the integration interval.
// @param h the integration step size to attempt.
// @param [out] xtplus_vie contains the velocity-implicit Euler solution
// (i.e., `xⁿ⁺¹`) after the large step, if successful, on return.
// @param [out] xtplus_hvie contains the velocity-implicit Euler solution
// (i.e., `xⁿ⁺¹`) after the two small steps, if successful, on
// return.
// @returns `true` if all three step attempts were successful, `false`
// otherwise.
bool AttemptStepPaired(const T& t0, const T& h, const VectorX<T>& xt0,
VectorX<T>* xtplus_vie, VectorX<T>* xtplus_hvie);
// Compute the partial derivatives of the ordinary differential equations with
// respect to the y variables of a given x(t). In particular, we compute the
// Jacobian, Jₗ(y), of the function ℓ(y), used in this integrator's
// residual computation, with respect to y, where y = (v,z) and x = (q,v,z).
// This Jacobian is then defined as:
// ℓ(y) = f_y(tⁿ⁺¹, qⁿ + h N(qₖ) v, y) (7)
// Jₗ(y) = ∂ℓ(y)/∂y (8)
// We use the Jacobian computation scheme from
// get_jacobian_computation_scheme(), which is either a first-order forward
// difference, a second-order centered difference, or automatic
// differentiation. See math::ComputeNumericalGradient() for more details on
// the first two methods.
// @param t refers to tⁿ⁺¹, the time used in the definition of ℓ(y)
// @param h is the time-step size parameter, h, used in the definition of
// ℓ(y)
// @param y is the generalized velocity and miscellaneous states around which
// to evaluate Jₗ(y).
// @param qk is qₖ, the current-iteration position used in the definition of
// ℓ(y).
// @param qn refers to qⁿ, the initial position used in ℓ(y)
// @param [out] Jy is the Jacobian matrix, Jₗ(y).
// @post The context's time will be set to t, and its continuous state will
// be indeterminate on return.
void CalcVelocityJacobian(const T& t, const T& h, const VectorX<T>& y,
const VectorX<T>& qk, const VectorX<T>& qn,
MatrixX<T>* Jy);
// Uses automatic differentiation to compute the Jacobian, Jₗ(y), of the
// function ℓ(y), used in this integrator's residual computation, with
// respect to y, where y = (v,z). This Jacobian is then defined as:
// ℓ(y) = f_y(tⁿ⁺¹, qⁿ + h N(qₖ) v, y) (7)
// Jₗ(y) = ∂ℓ(y)/∂y (8)
// In this method, we compute the Jacobian Jₗ(y) using automatic
// differentiation.
// @param t refers to tⁿ⁺¹, the time used in the definition of ℓ(y).
// @param h is the time-step size parameter, h, used in the definition of
// ℓ(y).
// @param y is the generalized velocity and miscellaneous states around which
// to evaluate Jₗ(y).
// @param qk is qₖ, the current-iteration position used in the definition of
// ℓ(y).
// @param qn refers to qⁿ, the initial position used in ℓ(y).
// @param [out] Jy is the Jacobian matrix, Jₗ(y).
// @note The context's time will be set to t, and its continuous state will
// be indeterminate on return.
void ComputeAutoDiffVelocityJacobian(const T& t, const T& h,
const VectorX<T>& y,
const VectorX<T>& qk,
const VectorX<T>& qn,
MatrixX<T>* Jy);
// Computes necessary matrices (Jacobian and iteration matrix) for
// Newton-Raphson (NR) iterations, as necessary. This method is based off of
// ImplicitIntegrator<T>::MaybeFreshenMatrices(). We implement our own version
// here to use a specialized Jacobian Jₗ(y). The aforementioned method was
// designed for use in DoImplicitIntegratorStep() processes that follow this
// model:
// 1. DoImplicitIntegratorStep(h) is called;
// 2. One or more NR iterations is performed until either (a) convergence is
// identified, (b) the iteration is found to diverge, or (c) too many
// iterations were taken. In the case of (a), DoImplicitIntegratorStep(h)
// will return success. Otherwise, the Newton-Raphson process is attempted
// again with (i) a recomputed and refactored iteration matrix and (ii) a
// recomputed Jacobian and a recomputed an refactored iteration matrix, in
// that order. The process stage of that NR algorithm is indicated by the
// `trial` parameter below. In this model, DoImplicitIntegratorStep()
// returns failure if the NR iterations reach a fourth trial.
//
// We provide our own method to execute the same logic, but with the
// following differences:
// 1. We use the specialized Jacobian Jₗ(y) instead of the full Jacobian.
// 2. We no longer use the get_reuse() logic to reuse a Jacobian
// when the time-step size (h) shrinks, because the specialized Jacobian
// Jₗ(y) depends on h.
// These changes allow the velocity-implicit Euler method to use the smaller
// specialized Jacobian Jₗ(y) in its Newton solves.
//
// @param t is the time at which to compute the Jacobian.
// @param y is the generalized velocity and miscellaneous states around which
// to evaluate Jₗ(y).
// @param qk is qₖ, the current-iteration position used in the definition of
// ℓ(y), which is used in the definition of Jₗ(y).
// @param qn is the generalized position at the beginning of the step.
// @param h is the integration step size.
// @param trial specifies which trial (1-4) the Newton-Raphson process is in
// when calling this method.
// @param compute_and_factor_iteration_matrix is a function pointer for
// computing and factoring the iteration matrix.
// @param [in, out] iteration_matrix is the updated and factored iteration
// matrix on return.
// @param [in, out] Jy is the updated and factored Jacobian matrix Jₗ(y) on
// return.
// @returns `false` if the calling stepping method should indicate failure;
// `true` otherwise.
// @pre 1 <= `trial` <= 4.
// @post The internal context may or may not be altered on return; if
// altered, the time will be set to t and the continuous state will be
// indeterminate.
bool MaybeFreshenVelocityMatrices(
const T& t, const VectorX<T>& y, const VectorX<T>& qk,
const VectorX<T>& qn, const T& h, int trial,
const std::function<
void(const MatrixX<T>& J, const T& h,
typename ImplicitIntegrator<T>::IterationMatrix*)>&
compute_and_factor_iteration_matrix,
typename ImplicitIntegrator<T>::IterationMatrix* iteration_matrix,
MatrixX<T>* Jy);
// Computes necessary matrices (Jacobian and iteration matrix) for full
// Newton-Raphson (NR) iterations, if full Newton-Raphson method is activated
// (if it's not activated, this method is a no-op).
// @param t the time at which to compute the Jacobian.
// @param y is the generalized velocity and miscellaneous states around which
// to evaluate Jₗ(y).
// @param qk is qₖ, the current-iteration position used in the definition of
// ℓ(y), which is used in the definition of Jₗ(y).
// @param qn is qⁿ, the generalized position at the beginning of the step.
// @param h the integration step size (for computing iteration matrices).
// @param compute_and_factor_iteration_matrix a function pointer for
// computing and factoring the iteration matrix.
// @param[out] iteration_matrix the updated and factored iteration matrix on
// return.
// @param[out] Jy the updated Jacobian matrix Jₗ(y).
// @post The internal context may or may not be altered on return; if
// altered, the time will be set to t and the continuous state will be
// indeterminate.
void FreshenVelocityMatricesIfFullNewton(
const T& t, const VectorX<T>& y, const VectorX<T>& qk,
const VectorX<T>& qn, const T& h,
const std::function<
void(const MatrixX<T>& J, const T& h,
typename ImplicitIntegrator<T>::IterationMatrix*)>&
compute_and_factor_iteration_matrix,
typename ImplicitIntegrator<T>::IterationMatrix* iteration_matrix,
MatrixX<T>* Jy);
// This helper method evaluates the Newton-Raphson residual R(y), defined as
// the following:
// R(y) = y - yⁿ - h ℓ(y),
// ℓ(y) = f_y(tⁿ⁺¹, qⁿ + h N(qₖ) v, y), (7)
// with tⁿ⁺¹, y = (v, z), qₖ, qⁿ, yⁿ, and h passed in.
// @param t refers to tⁿ⁺¹, the time at which to compute the residual R(y).
// @param y is the generalized velocity and miscellaneous states around which
// to evaluate R(y).
// @param qk is qₖ, the current-iteration position used in the definition of
// ℓ(y).
// @param qn is qⁿ, the generalized position at the beginning of the step.
// @param yn is yⁿ, the generalized velocity and miscellaneous states at the
// beginning of the step
// @param h is the step size.
// @param [in, out] qdot is a temporary BasicVector<T> of the same size as qⁿ
// allocated by the caller so that this method avoids unnecessary heap
// allocations. Its value is indeterminate upon return.
// @param [out] result is set to R(y).
// @post The context is set to (tⁿ⁺¹, qⁿ + h N(qₖ) v, y).
VectorX<T> ComputeResidualR(const T& t, const VectorX<T>& y,
const VectorX<T>& qk, const VectorX<T>& qn,
const VectorX<T>& yn, const T& h,
BasicVector<T>* qdot);
// This helper method evaluates ℓ(y), defined as the following:
// ℓ(y) = f_y(tⁿ⁺¹, qⁿ + h N(qₖ) v, y), (7)
// with tⁿ⁺¹, y = (v, z), qₖ, qⁿ, yⁿ, and h passed in.
// @param t refers to tⁿ⁺¹, the time at which to compute the residual R(y).
// @param y is the generalized velocity and miscellaneous states around which
// to evaluate ℓ(y).
// @param qk is qₖ, the current-iteration position used in the definition of
// ℓ(y).
// @param qn is qⁿ, the generalized position at the beginning of the step.
// @param h is the step size.
// @param [in, out] qdot is a temporary BasicVector<T> of the same size as qⁿ
// allocated by the caller so that this method avoids unnecessary heap
// allocations. Its value is indeterminate upon return.
// @param [out] result is set to ℓ(y).
// @post The context is set to (tⁿ⁺¹, qⁿ + h N(qₖ) v, y).
VectorX<T> ComputeLOfY(const T& t, const VectorX<T>& y, const VectorX<T>& qk,
const VectorX<T>& qn, const T& h,
BasicVector<T>* qdot) {
return this->ComputeLOfY(t, y, qk, qn, h, qdot, this->get_system(),
this->get_mutable_context());
}
// This helper method evaluates ℓ(y), defined as the following:
// ℓ(y) = f_y(tⁿ⁺¹, qⁿ + h N(qₖ) v, y), (7)
// with tⁿ⁺¹, y = (v, z), qₖ, qⁿ, yⁿ, and h passed in, for a system that can
// use scalar type U, which is either a double or an AutoDiffXd scalar type.
// If type U is different from T, then y, qdot, and context must also use
// the U scalar type. This version of the method exists to allow the
// VelocityImplicitEulerIntegrator to include AutoDiff'd systems in ℓ(y)
// evaluations, which is necessary when computing an AutoDiff'd velocity
// Jacobian; in particular, this version is explicitly called with type
// U=AutoDiffXd in ComputeAutoDiffVelocityJacobian().
// @param t refers to tⁿ⁺¹, the time at which to compute the residual R(y).
// @param y is the generalized velocity and miscellaneous states around which
// to evaluate ℓ(y); it uses the scalar type U.
// @param qk is qₖ, the current-iteration position used in the definition of
// ℓ(y).
// @param qn is qⁿ, the generalized position at the beginning of the step.
// @param h is the step size.
// @param [in, out] qdot is a temporary BasicVector<U> of the same size as qⁿ
// allocated by the caller so that this method avoids unnecessary heap
// allocations. Its value is indeterminate upon return.
// @param [in] system defines f_y() so that we can evaluate f_y() with the
// U scalar type.
// @param [in, out] context to pass in the time and continuous states when
// evaluating f_y(); its scalar type must also be U.
// @param [out] result is set to ℓ(y).
// @post context is set to (tⁿ⁺¹, qⁿ + h N(qₖ) v, y).
template <typename U>
VectorX<U> ComputeLOfY(const T& t, const VectorX<U>& y, const VectorX<T>& qk,
const VectorX<T>& qn, const T& h,
BasicVector<U>* qdot, const System<U>& system,
Context<U>* context);
// The last computed iteration matrix and factorization.
typename ImplicitIntegrator<T>::IterationMatrix iteration_matrix_vie_;
// Vector used in error estimate calculations. At the end of every step, we
// set this to ε* = x̅ⁿ⁺¹ - x̃ⁿ⁺¹, which is our estimate for ε = x̃ⁿ⁺¹ - xⁿ⁺¹,
// the error of the propagated half-sized steps.
VectorX<T> err_est_vec_;
// The continuous state update vector used during Newton-Raphson.
std::unique_ptr<ContinuousState<T>> dx_state_;
// Variables to avoid heap allocations.
VectorX<T> xn_, xdot_, xtplus_vie_, xtplus_hvie_;
std::unique_ptr<BasicVector<T>> qdot_;
// The following will help avoid repeated heap allocations when computing a
// velocity Jacobian using automatic differentiation.
std::unique_ptr<System<AutoDiffXd>> system_ad_;
std::unique_ptr<Context<AutoDiffXd>> context_ad_;
std::unique_ptr<BasicVector<AutoDiffXd>> qdot_ad_;
// The last computed velocity+misc Jacobian matrix.
MatrixX<T> Jy_vie_;
// Various statistics.
int64_t num_nr_iterations_{0};
// Half-sized-step-specific statistics, only updated when taking the half-
// sized steps.
int64_t num_half_vie_jacobian_reforms_{0};
int64_t num_half_vie_iter_factorizations_{0};
int64_t num_half_vie_function_evaluations_{0};
int64_t num_half_vie_jacobian_function_evaluations_{0};
int64_t num_half_vie_nr_iterations_{0};
};
// We do not support computing the Velocity Jacobian matrix using automatic
// differentiation when the scalar is already an AutoDiff type.
// Note: must be declared inline because it's specialized and located in the
// header file (to avoid multiple definition errors).
template <>
inline void VelocityImplicitEulerIntegrator<AutoDiffXd>::
ComputeAutoDiffVelocityJacobian(const AutoDiffXd&, const AutoDiffXd&,
const VectorX<AutoDiffXd>&,
const VectorX<AutoDiffXd>&,
const VectorX<AutoDiffXd>&,
MatrixX<AutoDiffXd>*) {
throw std::runtime_error("AutoDiff'd Jacobian not supported for "
"AutoDiff'd VelocityImplicitEulerIntegrator");
}
} // namespace systems
} // namespace drake
DRAKE_DECLARE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS(
class ::drake::systems::VelocityImplicitEulerIntegrator)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/BUILD.bazel | load("//tools/lint:lint.bzl", "add_lint_tests")
load(
"//tools/skylark:drake_cc.bzl",
"drake_cc_binary",
"drake_cc_googletest",
"drake_cc_library",
"drake_cc_package_library",
)
load(
"//tools/skylark:drake_py.bzl",
"drake_py_unittest",
)
package(default_visibility = ["//visibility:public"])
drake_cc_package_library(
name = "analysis",
visibility = ["//visibility:public"],
deps = [
":antiderivative_function",
":batch_eval",
":bogacki_shampine3_integrator",
":dense_output",
":explicit_euler_integrator",
":hermitian_dense_output",
":implicit_euler_integrator",
":implicit_integrator",
":initial_value_problem",
":instantaneous_realtime_rate_calculator",
":integrator_base",
":lyapunov",
":monte_carlo",
":radau_integrator",
":region_of_attraction",
":runge_kutta2_integrator",
":runge_kutta3_integrator",
":runge_kutta5_integrator",
":scalar_dense_output",
":scalar_initial_value_problem",
":scalar_view_dense_output",
":semi_explicit_euler_integrator",
":simulator",
":simulator_config",
":simulator_config_functions",
":simulator_print_stats",
":simulator_status",
":stepwise_dense_output",
":velocity_implicit_euler_integrator",
],
)
drake_cc_library(
name = "simulator_gflags",
srcs = ["simulator_gflags.cc"],
hdrs = ["simulator_gflags.h"],
tags = [
# Don't add this library into the ":analysis" package library.
# Only programs with a main() function should ever use this header.
"exclude_from_package",
# Don't install this header via this cc_library, because that would
# introduce a dependency from libdrake onto gflags.
"exclude_from_libdrake",
],
visibility = ["//:__subpackages__"],
deps = [
":simulator_config",
":simulator_config_functions",
"@gflags",
],
)
drake_cc_library(
name = "simulator_config",
srcs = ["simulator_config.cc"],
hdrs = ["simulator_config.h"],
deps = [
"//common:name_value",
],
)
drake_cc_library(
name = "simulator_config_functions",
srcs = ["simulator_config_functions.cc"],
hdrs = ["simulator_config_functions.h"],
deps = [
":bogacki_shampine3_integrator",
":explicit_euler_integrator",
":implicit_euler_integrator",
":radau_integrator",
":runge_kutta2_integrator",
":runge_kutta3_integrator",
":runge_kutta5_integrator",
":semi_explicit_euler_integrator",
":simulator",
":simulator_config",
":velocity_implicit_euler_integrator",
"//common:default_scalars",
"//common:essential",
"//common:nice_type_name",
"//systems/framework:leaf_system",
],
)
drake_cc_library(
name = "bogacki_shampine3_integrator",
srcs = ["bogacki_shampine3_integrator.cc"],
hdrs = ["bogacki_shampine3_integrator.h"],
deps = [
":integrator_base",
],
)
drake_cc_library(
name = "dense_output",
srcs = ["dense_output.cc"],
hdrs = ["dense_output.h"],
deps = [
"//common:default_scalars",
"//common:essential",
"@fmt",
],
)
drake_cc_library(
name = "stepwise_dense_output",
srcs = ["stepwise_dense_output.cc"],
hdrs = ["stepwise_dense_output.h"],
deps = [
":dense_output",
"//common:default_scalars",
"//common:essential",
],
)
drake_cc_library(
name = "hermitian_dense_output",
srcs = ["hermitian_dense_output.cc"],
hdrs = ["hermitian_dense_output.h"],
deps = [
":stepwise_dense_output",
"//common:default_scalars",
"//common:essential",
"//common:extract_double",
"//common/trajectories:piecewise_polynomial",
"//systems/framework:vector",
],
)
drake_cc_library(
name = "scalar_dense_output",
srcs = ["scalar_dense_output.cc"],
hdrs = ["scalar_dense_output.h"],
deps = [
":dense_output",
"//common:default_scalars",
"//common:essential",
],
)
drake_cc_library(
name = "scalar_view_dense_output",
srcs = ["scalar_view_dense_output.cc"],
hdrs = ["scalar_view_dense_output.h"],
deps = [
":dense_output",
":scalar_dense_output",
"//common:default_scalars",
"//common:essential",
"@fmt",
],
)
drake_cc_library(
name = "integrator_base",
srcs = ["integrator_base.cc"],
hdrs = ["integrator_base.h"],
deps = [
"//common:default_scalars",
"//common/trajectories:piecewise_polynomial",
"//systems/framework:context",
"//systems/framework:system",
],
)
drake_cc_library(
name = "explicit_euler_integrator",
srcs = ["explicit_euler_integrator.cc"],
hdrs = ["explicit_euler_integrator.h"],
deps = [
":integrator_base",
],
)
drake_cc_library(
name = "lyapunov",
srcs = ["lyapunov.cc"],
hdrs = ["lyapunov.h"],
deps = [
"//common:essential",
"//math:autodiff",
"//math:gradient",
"//solvers:mathematical_program",
"//solvers:solve",
"//systems/framework",
],
)
drake_cc_library(
name = "monte_carlo",
srcs = ["monte_carlo.cc"],
hdrs = ["monte_carlo.h"],
deps = [
":simulator",
"//common:parallelism",
"//systems/framework",
],
)
drake_cc_library(
name = "region_of_attraction",
srcs = ["region_of_attraction.cc"],
hdrs = ["region_of_attraction.h"],
deps = [
"//common:name_value",
"//math:continuous_lyapunov_equation",
"//math:matrix_util",
"//math:quadratic_form",
"//solvers:choose_best_solver",
"//solvers:mathematical_program",
"//solvers:solve",
"//systems/framework",
"//systems/primitives:linear_system",
],
)
drake_cc_library(
name = "runge_kutta2_integrator",
srcs = ["runge_kutta2_integrator.cc"],
hdrs = ["runge_kutta2_integrator.h"],
deps = [
":integrator_base",
],
)
drake_cc_library(
name = "runge_kutta3_integrator",
srcs = ["runge_kutta3_integrator.cc"],
hdrs = [
"runge_kutta3_integrator.h",
],
deps = [
":integrator_base",
":runge_kutta2_integrator",
],
)
drake_cc_library(
name = "runge_kutta5_integrator",
srcs = ["runge_kutta5_integrator.cc"],
hdrs = ["runge_kutta5_integrator.h"],
deps = [
":integrator_base",
],
)
drake_cc_library(
name = "implicit_euler_integrator",
srcs = ["implicit_euler_integrator.cc"],
hdrs = [
"implicit_euler_integrator.h",
],
deps = [
":implicit_integrator",
":runge_kutta2_integrator",
"//math:gradient",
],
)
drake_cc_library(
name = "velocity_implicit_euler_integrator",
srcs = ["velocity_implicit_euler_integrator.cc"],
hdrs = [
"velocity_implicit_euler_integrator.h",
],
deps = [
":implicit_integrator",
"//math:compute_numerical_gradient",
],
)
drake_cc_library(
name = "implicit_integrator",
srcs = ["implicit_integrator.cc"],
hdrs = [
"implicit_integrator.h",
],
deps = [
":integrator_base",
"//math:gradient",
],
)
drake_cc_library(
name = "instantaneous_realtime_rate_calculator",
srcs = ["instantaneous_realtime_rate_calculator.cc"],
hdrs = [
"instantaneous_realtime_rate_calculator.h",
],
deps = [
"//common:timer",
],
)
drake_cc_library(
name = "radau_integrator",
srcs = ["radau_integrator.cc"],
hdrs = [
"radau_integrator.h",
],
deps = [
":bogacki_shampine3_integrator",
":implicit_integrator",
":runge_kutta2_integrator",
],
)
drake_cc_library(
name = "semi_explicit_euler_integrator",
srcs = ["semi_explicit_euler_integrator.cc"],
hdrs = ["semi_explicit_euler_integrator.h"],
deps = [
":integrator_base",
],
)
drake_cc_library(
name = "initial_value_problem",
srcs = [
"initial_value_problem.cc",
],
hdrs = [
"initial_value_problem.h",
],
deps = [
":dense_output",
":hermitian_dense_output",
":integrator_base",
":runge_kutta3_integrator",
"//common:default_scalars",
"//systems/framework:context",
"//systems/framework:continuous_state",
"//systems/framework:leaf_system",
"//systems/framework:parameters",
"//systems/framework:vector",
],
)
drake_cc_library(
name = "scalar_initial_value_problem",
srcs = [
"scalar_initial_value_problem.cc",
],
hdrs = [
"scalar_initial_value_problem.h",
],
deps = [
":initial_value_problem",
":scalar_dense_output",
":scalar_view_dense_output",
"//common:default_scalars",
"//common:essential",
"//common:unused",
],
)
drake_cc_library(
name = "antiderivative_function",
srcs = [
"antiderivative_function.cc",
],
hdrs = [
"antiderivative_function.h",
],
deps = [
":scalar_dense_output",
":scalar_initial_value_problem",
"//common:default_scalars",
"//common:essential",
"//common:unused",
],
)
drake_cc_library(
name = "batch_eval",
srcs = [
"batch_eval.cc",
],
hdrs = [
"batch_eval.h",
],
interface_deps = [
"//common:essential",
"//common:parallelism",
"//systems/framework:system",
],
deps = [
"@common_robotics_utilities",
],
)
drake_cc_library(
name = "simulator_print_stats",
srcs = ["simulator_print_stats.cc"],
hdrs = ["simulator_print_stats.h"],
deps = [
":implicit_integrator",
":integrator_base",
":simulator",
"@fmt",
],
)
drake_cc_library(
name = "simulator_status",
srcs = ["simulator_status.cc"],
hdrs = ["simulator_status.h"],
deps = [
"//systems/framework:system_base",
"@fmt",
],
)
# N.B. This library does not have all of its dependencies declared. Instead,
# it defines only the headers such that it can be used by `pydrake` without
# installing the file. (If we just used `install_hdrs_exclude`, the header
# would not make it into `//:drake_shared_library`.)
drake_cc_library(
name = "simulator_python_internal_header",
hdrs = ["simulator_python_internal.h"],
install_hdrs_exclude = ["simulator_python_internal.h"],
tags = ["exclude_from_package"],
visibility = ["//bindings/pydrake/systems:__pkg__"],
)
drake_cc_library(
name = "simulator",
srcs = ["simulator.cc"],
hdrs = ["simulator.h"],
interface_deps = [
":integrator_base",
":simulator_config",
":simulator_status",
"//common:extract_double",
"//common:name_value",
"//systems/framework:context",
"//systems/framework:system",
],
deps = [
":runge_kutta3_integrator",
":simulator_python_internal_header",
],
)
# === test/ ===
drake_cc_googletest(
name = "simulator_config_functions_test",
deps = [
":simulator",
":simulator_config_functions",
"//common:nice_type_name",
"//common/test_utilities:expect_no_throw",
"//common/yaml",
"//systems/framework:leaf_system",
"//systems/primitives:constant_vector_source",
],
)
drake_cc_googletest(
name = "simulator_status_test",
deps = [
":simulator_status",
"//common:nice_type_name",
],
)
drake_cc_googletest(
name = "simulator_test",
deps = [
":explicit_euler_integrator",
":implicit_euler_integrator",
":runge_kutta3_integrator",
":simulator",
"//common/test_utilities:expect_throws_message",
"//common/test_utilities:is_dynamic_castable",
"//systems/analysis/test_utilities:controlled_spring_mass_system",
"//systems/analysis/test_utilities:logistic_system",
"//systems/analysis/test_utilities:my_spring_mass_system",
"//systems/analysis/test_utilities:stateless_system",
"//systems/primitives:constant_vector_source",
"//systems/primitives:integrator",
"//systems/primitives:vector_log_sink",
],
)
drake_cc_googletest(
name = "simulator_limit_malloc_test",
deps = [
":simulator",
"//common/test_utilities:limit_malloc",
"//systems/framework:diagram_builder",
"//systems/framework:leaf_system",
],
)
drake_cc_googletest(
name = "simulator_denorm_test",
# Valgrind core does not support the floating point register
# instructions that this test uses.
tags = ["no_valgrind_tools"],
deps = [
":simulator",
],
)
drake_cc_googletest(
name = "simulator_print_stats_test",
deps = [
":simulator_print_stats",
"//systems/primitives:constant_vector_source",
],
)
drake_cc_googletest(
name = "simulator_python_internal_test",
deps = [
":simulator",
":simulator_python_internal_header",
"//systems/primitives:constant_vector_source",
],
)
drake_cc_binary(
name = "simulator_gflags_main",
testonly = True,
srcs = ["test/simulator_gflags_main.cc"],
deps = [
":simulator_config_functions",
":simulator_gflags",
"//common/yaml",
"//systems/primitives:constant_vector_source",
],
)
drake_py_unittest(
name = "simulator_gflags_test",
data = [
":simulator_gflags_main",
],
deps = [
"@rules_python//python/runfiles",
],
)
drake_cc_googletest(
name = "batch_eval_test",
# This test launches 2 threads to test both serial and parallel code paths
# in MonteCarloSimulation.
tags = ["cpu:2"],
deps = [
":batch_eval",
"//common/test_utilities:eigen_matrix_compare",
"//systems/primitives:linear_system",
"//systems/primitives:symbolic_vector_system",
],
)
drake_cc_googletest(
name = "bogacki_shampine3_integrator_test",
# If necessary, increase test timeout to 'moderate' when run with Valgrind
# and/or disable in debug mode.
deps = [
":bogacki_shampine3_integrator",
":runge_kutta2_integrator",
"//common/test_utilities:eigen_matrix_compare",
"//math:geometric_transform",
"//multibody/plant",
"//systems/analysis/test_utilities",
],
)
drake_cc_googletest(
name = "explicit_euler_integrator_test",
timeout = "moderate",
deps = [
":explicit_euler_integrator",
"//common/test_utilities:expect_no_throw",
"//systems/analysis/test_utilities",
],
)
drake_cc_googletest(
name = "implicit_euler_integrator_test",
timeout = "moderate",
shard_count = 2,
# Adding cache invalidation made this take too long with Valgrind.
# See Drake issue #9261.
tags = [
"no_memcheck",
],
deps = [
":implicit_euler_integrator",
"//common/test_utilities:expect_throws_message",
"//systems/analysis/test_utilities:implicit_integrator_test",
"//systems/analysis/test_utilities:quadratic_scalar_system",
"//systems/analysis/test_utilities:spring_mass_system",
],
)
drake_cc_googletest(
name = "implicit_integrator_test",
deps = [
":implicit_integrator",
"//common/test_utilities:expect_no_throw",
"//systems/analysis/test_utilities:spring_mass_system",
],
)
drake_cc_googletest(
name = "integrator_base_test",
deps = [
":integrator_base",
"//common/test_utilities:expect_throws_message",
"//systems/analysis/test_utilities:spring_mass_system",
],
)
drake_cc_googletest(
name = "instantaneous_realtime_rate_calculator_test",
deps = [
":instantaneous_realtime_rate_calculator",
],
)
drake_cc_googletest(
name = "radau_integrator_test",
# Note: if memcheck takes too long with Valgrind, disable
# memcheck (per Drake issue #9621).
timeout = "moderate",
shard_count = 2,
deps = [
":implicit_euler_integrator",
":radau_integrator",
"//systems/analysis/test_utilities:cubic_scalar_system",
"//systems/analysis/test_utilities:implicit_integrator_test",
"//systems/analysis/test_utilities:linear_scalar_system",
"//systems/analysis/test_utilities:quadratic_scalar_system",
"//systems/analysis/test_utilities:spring_mass_system",
],
)
drake_cc_googletest(
name = "lyapunov_test",
deps = [
":lyapunov",
"//examples/pendulum:pendulum_plant",
],
)
drake_cc_googletest(
name = "monte_carlo_test",
# This test launches 2 threads to test both serial and parallel code paths
# in MonteCarloSimulation.
tags = ["cpu:2"],
deps = [
":monte_carlo",
"//systems/primitives:constant_vector_source",
"//systems/primitives:pass_through",
"//systems/primitives:random_source",
],
)
drake_cc_googletest(
name = "region_of_attraction_test",
deps = [
":region_of_attraction",
"//solvers:csdp_solver",
"//solvers:mosek_solver",
"//systems/framework:diagram_builder",
"//systems/primitives:constant_vector_source",
"//systems/primitives:symbolic_vector_system",
],
)
drake_cc_googletest(
name = "runge_kutta2_integrator_test",
deps = [
":runge_kutta2_integrator",
"//common/test_utilities:eigen_matrix_compare",
"//systems/analysis/test_utilities",
],
)
drake_cc_googletest(
name = "runge_kutta3_integrator_test",
# Test timeout increased to not timeout when run with Valgrind.
timeout = "moderate",
# This test is prohibitively slow with --compilation_mode=dbg.
disable_in_compilation_mode_dbg = True,
deps = [
":runge_kutta3_integrator",
"//common/test_utilities:eigen_matrix_compare",
"//math:geometric_transform",
"//multibody/plant",
"//systems/analysis/test_utilities",
],
)
drake_cc_googletest(
name = "runge_kutta5_integrator_test",
# Test timeout increased to not timeout when run with Valgrind.
timeout = "moderate",
# This test is prohibitively slow with --compilation_mode=dbg.
disable_in_compilation_mode_dbg = True,
deps = [
":runge_kutta5_integrator",
"//common/test_utilities:eigen_matrix_compare",
"//math:geometric_transform",
"//multibody/plant",
"//systems/analysis/test_utilities",
],
)
drake_cc_googletest(
name = "semi_explicit_euler_integrator_test",
timeout = "moderate",
deps = [
":explicit_euler_integrator",
":semi_explicit_euler_integrator",
"//multibody/plant",
"//systems/analysis/test_utilities",
],
)
drake_cc_googletest(
name = "velocity_implicit_euler_integrator_test",
# Note: if memcheck takes too long with Valgrind, disable
# memcheck (per Drake issue #9621).
timeout = "moderate",
shard_count = 2,
deps = [
":velocity_implicit_euler_integrator",
"//systems/analysis/test_utilities:implicit_integrator_test",
"//systems/analysis/test_utilities:quadratic_scalar_system",
],
)
drake_cc_googletest(
name = "initial_value_problem_test",
timeout = "moderate",
deps = [
":initial_value_problem",
":runge_kutta2_integrator",
"//common/test_utilities:eigen_matrix_compare",
"//common/test_utilities:expect_throws_message",
],
)
drake_cc_googletest(
name = "scalar_initial_value_problem_test",
deps = [
":runge_kutta2_integrator",
":scalar_initial_value_problem",
"//common/test_utilities:expect_throws_message",
],
)
drake_cc_googletest(
name = "antiderivative_function_test",
deps = [
":antiderivative_function",
":runge_kutta2_integrator",
"//common/test_utilities:expect_throws_message",
],
)
drake_cc_googletest(
name = "hermitian_dense_output_test",
deps = [
":hermitian_dense_output",
"//common:autodiff",
"//common:essential",
"//common/test_utilities:eigen_matrix_compare",
"//common/test_utilities:expect_no_throw",
"//common/test_utilities:expect_throws_message",
"//common/trajectories:piecewise_polynomial",
],
)
drake_cc_googletest(
name = "scalar_view_dense_output_test",
deps = [
":hermitian_dense_output",
":scalar_view_dense_output",
"//common:autodiff",
"//common:essential",
"//common/test_utilities:expect_throws_message",
],
)
add_lint_tests(enable_clang_format_lint = False)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/semi_explicit_euler_integrator.cc | #include "drake/systems/analysis/semi_explicit_euler_integrator.h"
DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS(
class drake::systems::SemiExplicitEulerIntegrator)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/batch_eval.h | #pragma once
#include <variant>
#include <vector>
#include "drake/common/parallelism.h"
#include "drake/systems/framework/system.h"
namespace drake {
namespace systems {
/** Evaluates the dynamics of a difference equation `system` at many times,
states, and inputs. See System<T>::EvalUniquePeriodicDiscreteUpdate().
Each column of `times`, `states`, and `inputs` will be associated with a single
evaluation of the dynamics. The return value will be a matrix with each column
corresponding to the next state of the system evaluated `num_time_steps *
time_step` seconds after the provided time, using the `time_step` that is
reported by System<T>::IsDifferenceEquationSystem().
@tparam T The scalar type of the system.
@param system The system to evaluate.
@param context A context associated with `system`, which can be used to pass
system parameters.
@param times A 1 x N vector of times at which to evaluate the dynamics.
@param states A num_states x N matrix of states at which to evaluate the
dynamics.
@param inputs A num_inputs x N matrix of inputs at which to evaluate the
dynamics, where num_inputs must match the size of the input port selected. If
input_port_index is set to InputPortSelection::kNoInput, then the inputs
argument will be ignored.
@param num_time_steps The returned value will be the state at `time +
time_step*num_time_steps`.
@param input_port_index The input port index to use for evaluating the
dynamics. The default is to use the first input if there is one. A specific
port index or kNoInput can be specified instead. The input port must be
vector-valued and have the same size as the number of rows in `inputs`.
@param parallelize The parallelism to use for evaluating the dynamics.
@return A matrix with each column corresponding to the next state at `time +
num_time_steps * time_step`.
@throws std::exception if `system.IsDifferenceEquationSystem()` is not true.
@throws std::exception if matrix shapes are inconsistent, with inputs required
only if an input port is provided.
*/
template <typename T>
MatrixX<T> BatchEvalUniquePeriodicDiscreteUpdate(
const System<T>& system,
const Context<T>& context,
const Eigen::Ref<const RowVectorX<T>>& times,
const Eigen::Ref<const MatrixX<T>>& states,
const Eigen::Ref<const MatrixX<T>>& inputs, int num_time_steps = 1,
std::variant<InputPortSelection, InputPortIndex> input_port_index =
InputPortSelection::kUseFirstInputIfItExists,
Parallelism parallelize = Parallelism::Max());
/** Evaluates the time derivatives of a `system` at many times, states, and
inputs.
Each column of `times`, `states`, and `inputs` will be associated with a single
evaluation of the dynamics. The return value will a matrix with the
corresponding time derivatives in each column. Any discrete variables in
`context` will be held constant across all evaluations.
@tparam T The scalar type of the system.
@param system The system to evaluate.
@param context A context associated with `system`, which can be used to pass
system parameters and discrete/abstract state.
@param times A 1 x N vector of times at which to evaluate the dynamics.
@param states A system.num_continuous_states() x N matrix of continuous states
at which to evaluate the dynamics.
@param inputs A num_inputs x N matrix of inputs at which to evaluate the
dynamics, where num_inputs must match the size of the input port selected. If
input_port_index is set to InputPortSelection::kNoInput, then the inputs
argument will be ignored.
@param input_port_index The input port index to use for evaluating the
dynamics. The default is to use the first input if there is one. A specific
port index or kNoInput can be specified instead. The input port must be
vector-valued and have the same size as the number of rows in `inputs`.
@param parallelize The parallelism to use for evaluating the dynamics.
@return A matrix with each column corresponding to the time derivatives.
@throws std::exception if matrix shapes are inconsistent, with inputs required
only if an input port is provided.
*/
template <typename T>
MatrixX<T> BatchEvalTimeDerivatives(
const System<T>& system,
const Context<T>& context,
const Eigen::Ref<const RowVectorX<T>>& times,
const Eigen::Ref<const MatrixX<T>>& states,
const Eigen::Ref<const MatrixX<T>>& inputs,
std::variant<InputPortSelection, InputPortIndex> input_port_index =
InputPortSelection::kUseFirstInputIfItExists,
Parallelism parallelize = Parallelism::Max());
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/initial_value_problem.h | #pragma once
#include <memory>
#include <optional>
#include <utility>
#include "drake/common/default_scalars.h"
#include "drake/common/drake_assert.h"
#include "drake/common/drake_copyable.h"
#include "drake/common/eigen_types.h"
#include "drake/systems/analysis/dense_output.h"
#include "drake/systems/analysis/integrator_base.h"
#include "drake/systems/framework/context.h"
namespace drake {
namespace systems {
/// A general initial value problem (or IVP) representation class, that allows
/// evaluating the 𝐱(t; 𝐤) solution function to the given ODE
/// d𝐱/dt = f(t, 𝐱; 𝐤), where f : t ⨯ 𝐱 → ℝⁿ, t ∈ ℝ, 𝐱 ∈ ℝⁿ, 𝐤 ∈ ℝᵐ,
/// provided an initial condition 𝐱(t₀; 𝐤) = 𝐱₀. The parameter vector 𝐤
/// allows for generic IVP definitions, which can later be solved for any
/// instance of said vector.
///
/// By default, an explicit 3rd order RungeKutta integration scheme is used.
///
/// The implementation of this class performs basic computation caching,
/// optimizing away repeated integration whenever the IVP is solved for
/// increasing values of time t while both initial conditions and parameters
/// are kept constant, e.g. if solved for t₁ > t₀ first, solving for t₂ > t₁
/// will only require integrating from t₁ onward.
///
/// Additionally, IntegratorBase's dense output support can be leveraged to
/// efficiently approximate the IVP solution within closed intervals of t.
/// This is convenient when there's a need for a more dense sampling of the
/// IVP solution than what would be available through either fixed or
/// error-controlled step integration (for a given accuracy), or when the IVP
/// is to be solved repeatedly for arbitrarily many t values within a given
/// interval. See documentation of the internally held IntegratorBase subclass
/// instance (either the default or a user-defined one, set via
/// reset_integrator()) for further reference on the specific dense output
/// technique in use.
///
/// For further insight into its use, consider the following examples:
///
/// - The momentum 𝐩 of a particle of mass m that is traveling through a
/// volume of a gas with dynamic viscosity μ can be described by
/// d𝐩/dt = -μ * 𝐩/m. At time t₀, the particle carries an initial momentum
/// 𝐩₀. In this context, t is unused (the ODE is autonomous), 𝐱 ≜ 𝐩,
/// 𝐤 ≜ [m, μ], t₀ = 0, 𝐱₀ ≜ 𝐩₀, d𝐱/dt = f(t, 𝐱; 𝐤) = -k₂ * 𝐱 / k₁.
///
/// - The velocity 𝐯 of the same particle in the same exact conditions as
/// before, but when a time varying force 𝐅(t) is applied to it, can be
/// be described by d𝐯/dt = (𝐅(t) - μ * 𝐯) / m. In this context, 𝐱 ≜ 𝐯,
/// 𝐤 ≜ [m, μ], 𝐱₀ ≜ 𝐯₀, d𝐱/dt = f(t, 𝐱; 𝐤) = (𝐅(t) - k₂ * 𝐱) / k₁.
///
/// @tparam_nonsymbolic_scalar
template <typename T>
class InitialValueProblem {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(InitialValueProblem);
/// Default integration accuracy in the relative tolerance sense.
static const double kDefaultAccuracy;
/// Default initial integration step size.
static const T kInitialStepSize;
/// Default maximum integration step size.
static const T kMaxStepSize;
/// General ODE system d𝐱/dt = f(t, 𝐱; 𝐤) function type.
///
/// @param t The independent scalar variable t ∈ ℝ.
/// @param x The dependent vector variable 𝐱 ∈ ℝⁿ.
/// @param k The vector of parameters 𝐤 ∈ ℝᵐ.
/// @return The derivative vector d𝐱/dt ∈ ℝⁿ.
using OdeFunction = std::function<VectorX<T>(const T& t, const VectorX<T>& x,
const VectorX<T>& k)>;
/// Constructs an IVP described by the given @p ode_function, using @p x0 as
/// initial conditions, and parameterized with @p k.
///
/// @param ode_function The ODE function f(t, 𝐱; 𝐤) that describes the state
/// evolution over time.
/// @param x0 The initial state vector 𝐱₀ ∈ ℝⁿ.
/// @param k The parameter vector 𝐤 ∈ ℝᵐ. By default m=0 (no parameters).
InitialValueProblem(const OdeFunction& ode_function,
const Eigen::Ref<const VectorX<T>>& x0,
const Eigen::Ref<const VectorX<T>>& k = Vector0<T>{});
/// Solves the IVP from the initial time @p t0 up to time @p tf, using the
/// initial state vector 𝐱₀ and parameter vector 𝐤 provided in the
/// constructor.
/// @throws std::exception if t0 > tf.
VectorX<T> Solve(const T& t0, const T& tf) const;
/// Solves and yields an approximation of the IVP solution x(t; 𝐤) for the
/// closed time interval between the given initial time @p t0 and the given
/// final time @p tf, using initial state 𝐱₀ and parameter vector 𝐤 provided
/// in the constructor.
///
/// To this end, the wrapped IntegratorBase instance solves this IVP,
/// advancing time and state from t₀ and 𝐱₀ = 𝐱(t₀) to @p tf and 𝐱(@p tf),
/// creating a dense output over that [t₀, @p tf] interval along the way.
///
/// @param tf The IVP will be solved up to this time, which must be ≥ t₀.
/// Usually, t₀ < @p tf as an empty dense output would result if t₀ = @p tf.
/// @returns A dense approximation to 𝐱(t; 𝐤) with 𝐱(t₀; 𝐤) = 𝐱₀,
/// defined for t₀ ≤ t ≤ tf.
/// @note The larger the given @p tf value is, the larger the approximated
/// interval will be. See documentation of the specific dense output
/// technique in use for reference on performance impact as this
/// interval grows.
/// @throws std::exception if t0 > tf.
std::unique_ptr<DenseOutput<T>> DenseSolve(const T& t0, const T& tf) const;
/// Resets the internal integrator instance by in-place
/// construction of the given integrator type.
///
/// A usage example is shown below.
/// @code{.cpp}
/// ivp.reset_integrator<RungeKutta2Integrator<T>>(max_step);
/// @endcode
///
/// @param args The integrator type-specific arguments.
/// @returns The new integrator instance.
/// @tparam Integrator The integrator type, which must be an
/// IntegratorBase subclass.
/// @tparam Args The integrator specific argument types.
/// @warning This operation invalidates pointers returned by
/// InitialValueProblem::get_integrator() and
/// InitialValueProblem::get_mutable_integrator().
template <typename Integrator, typename... Args>
Integrator* reset_integrator(Args&&... args) {
integrator_ =
std::make_unique<Integrator>(*system_, std::forward<Args>(args)...);
integrator_->reset_context(context_.get());
return static_cast<Integrator*>(integrator_.get());
}
/// Gets a reference to the internal integrator instance.
const IntegratorBase<T>& get_integrator() const {
DRAKE_DEMAND(integrator_ != nullptr);
return *integrator_.get();
}
/// Gets a mutable reference to the internal integrator instance.
IntegratorBase<T>& get_mutable_integrator() {
DRAKE_DEMAND(integrator_ != nullptr);
return *integrator_.get();
}
private:
// Resets the context / integrator between multiple solves.
void ResetState() const;
// IVP ODE solver integration context.
std::unique_ptr<Context<T>> context_;
// IVP system representation used for ODE solving.
std::unique_ptr<System<T>> system_;
// Numerical integrator used for IVP ODE solving.
std::unique_ptr<IntegratorBase<T>> integrator_;
};
} // namespace systems
} // namespace drake
DRAKE_DECLARE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS(
class drake::systems::InitialValueProblem)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/runge_kutta2_integrator.h | #pragma once
#include <memory>
#include "drake/common/default_scalars.h"
#include "drake/common/drake_copyable.h"
#include "drake/systems/analysis/integrator_base.h"
namespace drake {
namespace systems {
/**
* A second-order, explicit Runge Kutta integrator.
* @tparam_default_scalar
* @ingroup integrators
*/
template <class T>
class RungeKutta2Integrator final : public IntegratorBase<T> {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(RungeKutta2Integrator)
~RungeKutta2Integrator() override = default;
/**
* Constructs fixed-step integrator for a given system using the given
* context for initial conditions.
* @param system A reference to the system to be simulated
* @param max_step_size The maximum (fixed) step size; the integrator will
* not take larger step sizes than this.
* @param context pointer to the context (nullptr is ok, but the caller
* must set a non-null context before Initialize()-ing the
* integrator).
* @sa Initialize()
*/
RungeKutta2Integrator(const System<T>& system, const T& max_step_size,
Context<T>* context = nullptr) :
IntegratorBase<T>(system, context) {
IntegratorBase<T>::set_maximum_step_size(max_step_size);
derivs0_ = IntegratorBase<T>::get_system().AllocateTimeDerivatives();
}
/**
* The RK2 integrator does not support error estimation.
*/
bool supports_error_estimation() const override { return false; }
/// Integrator does not provide an error estimate.
int get_error_estimate_order() const override { return 0; }
private:
bool DoStep(const T& h) override;
// A pre-allocated temporary for use by integration.
std::unique_ptr<ContinuousState<T>> derivs0_;
};
/**
* Integrates the system forward in time from the current time t₀ to
* t₁ = t₀ + h. The value of h is determined by IntegratorBase::Step().
*
* The Butcher tableau for this integrator follows: <pre>
*
* 0 |
* 1 | 1
* -----------------
* 1/2 1/2
* </pre>
*/
template <class T>
bool RungeKutta2Integrator<T>::DoStep(const T& h) {
Context<T>* const context = IntegratorBase<T>::get_mutable_context();
// CAUTION: This is performance-sensitive inner loop code that uses dangerous
// long-lived references into state and cache to avoid unnecessary copying and
// cache invalidation. Be careful not to insert calls to methods that could
// invalidate any of these references before they are used.
// TODO(sherm1) Consider moving this notation description to IntegratorBase
// when it is more widely adopted.
// Notation: we're using numeric subscripts for times t₀ and t₁, and
// lower-case letter superscripts like t⁽ᵃ⁾ and t⁽ᵇ⁾ to indicate values
// for intermediate stages of which there is only one here, stage a.
// State x₀ = {xc₀, xd₀, xa₀}. We modify only t and xc here, but
// derivative calculations depend on everything in the context, including t,
// x and inputs u (which may depend on t and x).
// Define x⁽ᵃ⁾ ≜ {xc⁽ᵃ⁾, xd₀, xa₀} and u⁽ᵃ⁾ ≜ u(t⁽ᵃ⁾, x⁽ᵃ⁾).
// Evaluate derivative xcdot₀ ← xcdot(t₀, x(t₀), u(t₀)). Copy the result
// into a temporary since we'll be calculating another derivative below.
derivs0_->get_mutable_vector().SetFrom(
this->EvalTimeDerivatives(*context).get_vector());
const VectorBase<T>& xcdot0 = derivs0_->get_vector();
// Cache: xcdot0 references a *copy* of the derivative result so is immune
// to subsequent evaluations.
// First intermediate stage is an explicit Euler step. This call marks t-
// and xc-dependent cache entries out of date, including the derivative
// cache entry.
VectorBase<T>& xc = context->SetTimeAndGetMutableContinuousStateVector(
context->get_time() + h); // t⁽ᵃ⁾ ← t₁ = t₀ + h
xc.PlusEqScaled(h, xcdot0); // xc⁽ᵃ⁾ ← xc₀ + h * xcdot₀
// Evaluate derivative xcdot⁽ᵃ⁾ ← xcdot(t⁽ᵃ⁾, x⁽ᵃ⁾, u⁽ᵃ⁾).
const VectorBase<T>& xcdot_a =
this->EvalTimeDerivatives(*context).get_vector();
// Cache: xcdot_a references the live derivative cache value, currently
// up to date but about to be marked out of date. We do not want to make
// an unnecessary copy of this data.
// Cache: Because we captured a reference to xc above and now want to modify
// it in place and recalculate, we must manually tell the caching system that
// we've made that change since it is otherwise unobservable. There is an
// advanced method available for this purpose.
// Marks xc-dependent cache entries out of date, including xcdot_a; time
// doesn't change here.
context->NoteContinuousStateChange();
// Cache: xcdot_a still references the derivative cache value, which is
// unchanged, although it is marked out of date. xcdot0 is unaffected.
// xc₁ = xc₀ + h * (xcdot₀ + xcdot⁽ᵃ⁾)/2
// = xc⁽ᵃ⁾ + h * (xcdot⁽ᵃ⁾ - xcdot₀)/2
xc.PlusEqScaled({{h / 2, xcdot_a}, {-h / 2, xcdot0}});
// RK2 always succeeds at taking the step.
return true;
}
} // namespace systems
} // namespace drake
DRAKE_DECLARE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_SCALARS(
class drake::systems::RungeKutta2Integrator)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/simulator_python_internal.h | #pragma once
namespace drake {
namespace systems {
namespace internal {
/* Offers a pydrake-specific private interface to the simulator.
The implementation of this header lives in `simulator.cc`.
@tparam_nonsymbolic_scalar */
template <typename T>
class SimulatorPythonInternal {
public:
SimulatorPythonInternal() = delete;
/* Sets a python-specific `monitor` function callback for AdvanceTo(). The
`monitor` is a plain function function (not std::function) for performance.
Setting to nullptr removes the monitor. */
static void set_python_monitor(Simulator<T>* simulator, void (*monitor)());
};
} // namespace internal
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/simulator_status.h | #pragma once
#include <string>
#include <utility>
#include "drake/common/drake_assert.h"
#include "drake/common/drake_copyable.h"
#include "drake/common/nice_type_name.h"
#include "drake/systems/framework/system_base.h"
namespace drake {
namespace systems {
/** Holds the status return value from a call to Simulator::AdvanceTo() and
related methods. The argument t to AdvanceTo(t) is called the boundary time,
and represents the maximum time to which the simulation trajectory will be
advanced by a call to AdvanceTo(). (For methods that don't advance time, the
current time is considered to be the boundary time.) A normal, successful return
means that simulated time advanced successfully to the boundary time, without
encountering a termination condition or error condition. AdvanceTo() may return
earlier than the boundary time if one of those conditions is encountered.
In that case the return object holds a reference to the subsystem that detected
the condition and a human-friendly message from that subsystem that hopefully
explains what happened. */
class SimulatorStatus {
public:
DRAKE_DEFAULT_COPY_AND_MOVE_AND_ASSIGN(SimulatorStatus)
enum ReturnReason {
/** This is the normal return: no termination or error condition was
encountered before reaching the boundary time. There is no message and no
saved System. */
kReachedBoundaryTime,
/** An event handler or monitor function returned with a "reached
termination condition" EventStatus (has message with details). For
AdvanceTo() the return time may be earlier than the boundary time. */
kReachedTerminationCondition,
/** An event handler or monitor function returned with a "failed"
EventStatus (has message with details). For AdvanceTo() the return time may
be earlier than the boundary time. */
kEventHandlerFailed
};
/** Sets this status to "reached boundary time" with no message and with
the final time set to the boundary time (this is the same as the
post-construction default). */
void SetReachedBoundaryTime() {
reason_ = kReachedBoundaryTime;
return_time_ = boundary_time_;
system_ = nullptr;
message_.clear();
}
/** Sets this status to "reached termination" with the early-termination
time and a message explaining why. */
void SetReachedTermination(double termination_time, const SystemBase* system,
std::string message) {
SetResult(termination_time, kReachedTerminationCondition, system,
std::move(message));
}
/** Sets this status to "event handler failed" with the early-termination
time and a message explaining why. */
void SetEventHandlerFailed(double failure_time, const SystemBase* system,
std::string message) {
SetResult(failure_time, kEventHandlerFailed, system, std::move(message));
}
/** Returns a human-readable message explaining the return result. */
std::string FormatMessage() const;
/** Returns true if we reached the boundary time with no surprises. */
bool succeeded() const { return reason() == kReachedBoundaryTime; }
/** Returns the maximum time we could have reached with this call; whether
we actually got there depends on the status. This is the time supplied in
an AdvanceTo() call or the current time for methods that don't advance
time, that is, Initialize() and AdvancePendingEvents(). */
double boundary_time() const { return boundary_time_; }
/** Returns the time that was actually reached. This will be boundary_time()
if succeeded() returns true. Otherwise it is the time at which a termination
or error condition was detected and may be earlier than boundary_time(). */
double return_time() const { return return_time_; }
/** Returns the reason that a %Simulator call returned. */
ReturnReason reason() const { return reason_; }
/** Optionally, returns the subsystem to which the status and contained
message should be attributed. May be nullptr in which case the status
should be attributed to the System as a whole. */
const SystemBase* system() const { return system_; }
/** For termination or error conditions, returns a human-readable message
explaining what happened. This is the message from the subsystem that
detected the condition. FormatMessage() returns additional information and
also includes this message. */
const std::string& message() const { return message_; }
/** Returns true if the `other` status contains exactly the same information
as `this` status. This is likely only useful for unit testing of
%SimulatorStatus. */
bool IsIdenticalStatus(const SimulatorStatus& other) {
return boundary_time() == other.boundary_time() &&
return_time() == other.return_time() && reason() == other.reason() &&
system() == other.system() && message() == other.message();
}
#ifndef DRAKE_DOXYGEN_CXX
/* (Internal) For use by Simulator methods, creates a SimulatorStatus that
assumes we will reach the specified `boundary_time` which should be the
argument t that was supplied for AdvanceTo(t), or the current time for
Initialize() and AdvancePendingEvents(). */
explicit SimulatorStatus(double boundary_time)
: boundary_time_(boundary_time) {
SetReachedBoundaryTime();
}
#endif
private:
void SetResult(double return_time, ReturnReason reason,
const SystemBase* system,
std::string message) {
DRAKE_DEMAND(return_time <= boundary_time_);
return_time_ = return_time;
reason_ = reason;
system_ = system;
message_ = std::move(message);
}
double boundary_time_{};
double return_time_{}; // Initially set to boundary_time.
ReturnReason reason_{kReachedBoundaryTime};
const SystemBase* system_{nullptr};
std::string message_;
};
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/simulator_config.cc | #include "drake/systems/analysis/simulator_config.h"
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/implicit_integrator.h | #pragma once
#include <algorithm>
#include <limits>
#include <memory>
#include <utility>
#include <Eigen/LU>
#include "drake/common/autodiff.h"
#include "drake/common/default_scalars.h"
#include "drake/common/drake_copyable.h"
#include "drake/systems/analysis/integrator_base.h"
namespace drake {
namespace systems {
/**
* An abstract class providing methods shared by implicit integrators.
* @tparam_nonsymbolic_scalar
*/
template <class T>
class ImplicitIntegrator : public IntegratorBase<T> {
public:
virtual ~ImplicitIntegrator() {}
explicit ImplicitIntegrator(const System<T>& system,
Context<T>* context = nullptr)
: IntegratorBase<T>(system, context) {}
/** The maximum number of Newton-Raphson iterations to take before the
Newton-Raphson process decides that convergence will not be attained. This
number affects the speed with which a solution is found. If the number is
too small, Jacobian/iteration matrix reformations and refactorizations will
be performed unnecessarily. If the number is too large, the Newton-Raphson
process will waste time evaluating derivatives when convergence is
infeasible. [Hairer, 1996] states, "It is our experience that the code
becomes more efficient when we allow a relatively high number of iterations
(e.g., [7 or 10])", p. 121. Note that the focus of that quote is a 5th order
integrator that uses a quasi-Newton approach.
*/
int max_newton_raphson_iterations() const {
return do_max_newton_raphson_iterations();
}
enum class JacobianComputationScheme {
/// Forward differencing.
kForwardDifference,
/// Central differencing.
kCentralDifference,
/// Automatic differentiation.
kAutomatic
};
/// @name Methods for getting and setting the Jacobian scheme.
///
/// Methods for getting and setting the scheme used to determine the
/// Jacobian matrix necessary for solving the requisite nonlinear system
/// if equations.
///
/// Selecting the wrong Jacobian determination scheme will slow (possibly
/// critically) the implicit integration process. Automatic differentiation is
/// recommended if the System supports it for reasons of both higher
/// accuracy and increased speed. Forward differencing (i.e., numerical
/// differentiation) exhibits error in the approximation close to √ε, where
/// ε is machine epsilon, from n forward dynamics calls (where n is the number
/// of state variables). Central differencing yields the most accurate
/// numerically differentiated Jacobian matrix, but expends double the
/// computational effort for approximately three digits greater accuracy: the
/// total error in the central-difference approximation is close to ε^(2/3),
/// from 2n forward dynamics calls. See [Nocedal 2004, pp. 167-169].
///
/// - [Nocedal 2004] J. Nocedal and S. Wright. Numerical Optimization.
/// Springer, 2004.
/// @{
/// Sets whether the integrator attempts to reuse Jacobian matrices and
/// iteration matrix factorizations (default is `true`). Forming Jacobian
/// matrices and factorizing iteration matrices are generally the two most
/// expensive operations performed by this integrator. For small systems
/// (those with on the order of ten state variables), the additional accuracy
/// that using fresh Jacobians and factorizations buys- which can permit
/// increased step sizes but should have no effect on solution accuracy- can
/// outweigh the small factorization cost.
/// @note The reuse setting will have no effect when
/// get_use_full_newton() `== true`.
/// @see get_reuse()
/// @see set_use_full_newton()
void set_reuse(bool reuse) { reuse_ = reuse; }
/// Gets whether the integrator attempts to reuse Jacobian matrices and
/// iteration matrix factorizations.
/// @see set_reuse()
/// @note This method always returns `false` when full-Newton mode is on.
bool get_reuse() const { return !use_full_newton_ && reuse_; }
/// Sets whether the method operates in "full Newton" mode, in which case
/// Jacobian and iteration matrices are freshly computed on every
/// Newton-Raphson iteration. When set to `true`, this mode overrides
/// the reuse mode.
/// @see set_reuse()
void set_use_full_newton(bool flag) { use_full_newton_ = flag; }
/// Gets whether this method is operating in "full Newton" mode.
/// @see set_use_full_newton()
bool get_use_full_newton() const { return use_full_newton_; }
/// Sets the Jacobian computation scheme. This function can be safely called
/// at any time (i.e., the integrator need not be re-initialized afterward).
/// @note Discards any already-computed Jacobian matrices if the scheme
/// changes.
void set_jacobian_computation_scheme(JacobianComputationScheme scheme) {
if (jacobian_scheme_ != scheme) {
J_.resize(0, 0);
// Reset the Jacobian and any matrices cached by child integrators.
DoResetCachedJacobianRelatedMatrices();
}
jacobian_scheme_ = scheme;
}
JacobianComputationScheme get_jacobian_computation_scheme() const {
return jacobian_scheme_;
}
/// @}
/// @name Cumulative statistics functions.
/// The functions return statistics specific to the implicit integration
/// process.
/// @{
/// Gets the number of ODE function evaluations
/// (calls to EvalTimeDerivatives()) *used only for computing
/// the Jacobian matrices* since the last call to ResetStatistics(). This
/// count includes those derivative calculations necessary for computing
/// Jacobian matrices during error estimation processes.
int64_t get_num_derivative_evaluations_for_jacobian() const {
return num_jacobian_function_evaluations_;
}
/// Gets the number of Jacobian computations (i.e., the number of times
/// that the Jacobian matrix was reformed) since the last call to
/// ResetStatistics(). This count includes those evaluations necessary
/// during error estimation processes.
int64_t get_num_jacobian_evaluations() const { return
num_jacobian_evaluations_;
}
/// Gets the number of iterations used in the Newton-Raphson nonlinear systems
/// of equation solving process since the last call to ResetStatistics(). This
/// count includes those Newton-Raphson iterations used during error
/// estimation processes.
int64_t get_num_newton_raphson_iterations() const {
return do_get_num_newton_raphson_iterations();
}
/// Gets the number of factorizations of the iteration matrix since the last
/// call to ResetStatistics(). This count includes those refactorizations
/// necessary during error estimation processes.
int64_t get_num_iteration_matrix_factorizations() const {
return num_iter_factorizations_;
}
/// Gets the number of ODE function evaluations
/// (calls to EvalTimeDerivatives()) *used only for the error estimation
/// process* since the last call to ResetStatistics(). This count
/// includes those needed to compute Jacobian matrices.
int64_t get_num_error_estimator_derivative_evaluations() const {
return do_get_num_error_estimator_derivative_evaluations();
}
/// @}
/// @name Error-estimation statistics functions.
/// The functions return statistics specific to the error estimation
/// process.
/// @{
/// Gets the number of ODE function evaluations (calls to
/// CalcTimeDerivatives()) *used only for computing the Jacobian matrices
/// needed by the error estimation process* since the last call to
/// ResetStatistics().
int64_t get_num_error_estimator_derivative_evaluations_for_jacobian() const {
return do_get_num_error_estimator_derivative_evaluations_for_jacobian();
}
/// Gets the number of iterations *used in the Newton-Raphson nonlinear
/// systems of equation solving process for the error estimation process*
/// since the last call to ResetStatistics().
int64_t get_num_error_estimator_newton_raphson_iterations() const { return
do_get_num_error_estimator_newton_raphson_iterations();
}
/// Gets the number of Jacobian matrix computations *used only during
/// the error estimation process* since the last call to ResetStatistics().
int64_t get_num_error_estimator_jacobian_evaluations() const {
return do_get_num_error_estimator_jacobian_evaluations();
}
/// Gets the number of factorizations of the iteration matrix *used only
/// during the error estimation process* since the last call to
/// ResetStatistics().
int64_t get_num_error_estimator_iteration_matrix_factorizations() const {
return do_get_num_error_estimator_iteration_matrix_factorizations();
}
/// @}
protected:
/// Derived classes can override this method to change the number of
/// Newton-Raphson iterations (10 by default) to take before the
/// Newton-Raphson process decides that convergence will not be attained.
virtual int do_max_newton_raphson_iterations() const { return 10; }
/// A class for storing the factorization of an iteration matrix and using it
/// to solve linear systems of equations. This class exists simply because
/// Eigen AutoDiff puts limitations on what kinds of factorizations can be
/// used; encapsulating the iteration matrix factorizations like this frees
/// the implementer of these kinds of details.
class IterationMatrix {
public:
/// Factors a dense matrix (the iteration matrix) using LU factorization,
/// which should be faster than the QR factorization used in the specialized
/// template method for AutoDiffXd below.
void SetAndFactorIterationMatrix(const MatrixX<T>& iteration_matrix);
/// Solves a linear system Ax = b for x using the iteration matrix (A)
/// factored using LU decomposition.
/// @see Factor()
VectorX<T> Solve(const VectorX<T>& b) const;
/// Returns whether the iteration matrix has been set and factored.
bool matrix_factored() const { return matrix_factored_; }
private:
bool matrix_factored_{false};
// A simple LU factorization is all that is needed for ImplicitIntegrator
// templated on scalar type `double`; robustness in the solve
// comes naturally as h << 1. Keeping this data in the class definition
// serves to minimize heap allocations and deallocations.
Eigen::PartialPivLU<MatrixX<double>> LU_;
// The only factorization supported by automatic differentiation in Eigen is
// currently QR. When ImplicitIntegrator is templated on type AutoDiffXd,
// this will be the factorization that is used.
Eigen::HouseholderQR<MatrixX<AutoDiffXd>> QR_;
};
/// Computes necessary matrices (Jacobian and iteration matrix) for
/// Newton-Raphson (NR) iterations, as necessary. This method has been
/// designed for use in DoImplicitIntegratorStep() processes that follow this
/// model:
/// 1. DoImplicitIntegratorStep(h) is called;
/// 2. One or more NR iterations is performed until either (a) convergence is
/// identified, (b) the iteration is found to diverge, or (c) too many
/// iterations were taken. In the case of (a), DoImplicitIntegratorStep(h)
/// will return success. Otherwise, the Newton-Raphson process is attempted
/// again with (i) a recomputed and refactored iteration matrix and (ii) a
/// recomputed Jacobian and a recomputed an refactored iteration matrix, in
/// that order. The process stage of that NR algorithm is indicated by the
/// `trial` parameter below. In this model, DoImplicitIntegratorStep()
/// returns failure if the NR iterations reach a fourth trial.
///
/// Note that the sophisticated logic above only applies when the Jacobian
/// reuse is activated (default, see get_reuse()).
///
/// @param t the time at which to compute the Jacobian.
/// @param xt the continuous state at which the Jacobian is computed.
/// @param h the integration step size (for computing iteration matrices).
/// @param trial which trial (1-4) the Newton-Raphson process is in when
/// calling this method.
/// @param compute_and_factor_iteration_matrix a function pointer for
/// computing and factoring the iteration matrix.
/// @param[out] iteration_matrix the updated and factored iteration matrix on
/// return.
/// @returns `false` if the calling stepping method should indicate failure;
/// `true` otherwise.
/// @pre 1 <= `trial` <= 4.
/// @post the state in the internal context may or may not be altered on
/// return; if altered, it will be set to (t, xt).
bool MaybeFreshenMatrices(const T& t, const VectorX<T>& xt, const T& h,
int trial,
const std::function<void(const MatrixX<T>& J, const T& h,
typename ImplicitIntegrator<T>::IterationMatrix*)>&
compute_and_factor_iteration_matrix,
typename ImplicitIntegrator<T>::IterationMatrix* iteration_matrix);
/// Computes necessary matrices (Jacobian and iteration matrix) for full
/// Newton-Raphson (NR) iterations, if full Newton-Raphson method is activated
/// (if it's not activated, this method is a no-op).
/// @param t the time at which to compute the Jacobian.
/// @param xt the continuous state at which the Jacobian is computed.
/// @param h the integration step size (for computing iteration matrices).
/// @param compute_and_factor_iteration_matrix a function pointer for
/// computing and factoring the iteration matrix.
/// @param[out] iteration_matrix the updated and factored iteration matrix on
/// return.
/// @post the state in the internal context will be set to (t, xt) and this
/// will store the updated Jacobian matrix, on return.
void FreshenMatricesIfFullNewton(const T& t, const VectorX<T>& xt, const T& h,
const std::function<void(const MatrixX<T>& J, const T& h,
typename ImplicitIntegrator<T>::IterationMatrix*)>&
compute_and_factor_iteration_matrix,
typename ImplicitIntegrator<T>::IterationMatrix* iteration_matrix);
/// Checks whether a proposed update is effectively zero, indicating that the
/// Newton-Raphson process converged.
/// @param xc the continuous state.
/// @param dxc the update to the continuous state.
/// @param eps the tolerance that will be used to determine whether the
/// change in any dimension of the state is nonzero. `eps` will
/// be treated as an absolute tolerance when the magnitude of a
/// particular dimension of the state is no greater than unity and as
/// a relative tolerance otherwise. For non-positive `eps` (default),
/// an appropriate tolerance will be computed.
/// @return `true` if the update is effectively zero.
bool IsUpdateZero(
const VectorX<T>& xc, const VectorX<T>& dxc, double eps = -1.0) const {
using std::abs;
using std::max;
// Reset the tolerance, if necessary, by backing off slightly from the
// tightest tolerance (machine epsilon).
if (eps <= 0)
eps = 10 * std::numeric_limits<double>::epsilon();
for (int i = 0; i < xc.size(); ++i) {
// We do not want the presence of a NaN to cause this function to
// spuriously return `true`, so indicate the update is not zero when a NaN
// is detected. This will make the Newton-Raphson process in the caller
// continue iterating until its inevitable failure.
using std::isnan;
if (isnan(dxc[i]) || isnan(xc[i])) return false;
const T tol = max(abs(xc[i]), T(1)) * eps;
if (abs(dxc[i]) > tol)
return false;
}
return true;
}
enum class ConvergenceStatus {
kDiverged,
kConverged,
kNotConverged,
};
/// Checks a Newton-Raphson iteration process for convergence. The logic
/// is based on the description on p. 121 from
/// [Hairer, 1996] E. Hairer and G. Wanner. Solving Ordinary Differential
/// Equations II (Stiff and Differential-Algebraic Problems).
/// Springer, 1996.
/// This function is called after the dx is computed in an iteration, to
/// determine if the Newton process converged, diverged, or needs further
/// iterations.
/// @param iteration the iteration index, starting at 0 for the first
/// iteration.
/// @param xtplus the state x at the current iteration.
/// @param dx the state change dx the difference between xtplus at the
/// current and the previous iteration.
/// @param dx_norm the weighted norm of dx
/// @param last_dx_norm the weighted norm of dx from the previous iteration.
/// This parameter is ignored during the first iteration.
/// @return `kConverged` for convergence, `kDiverged` for divergence,
/// otherwise `kNotConverged` if Newton-Raphson should simply
/// continue.
ConvergenceStatus CheckNewtonConvergence(int iteration,
const VectorX<T>& xtplus, const VectorX<T>& dx, const T& dx_norm,
const T& last_dx_norm) const;
/// Resets any statistics particular to a specific implicit integrator. The
/// default implementation of this function does nothing. If your integrator
/// collects its own statistics, you should re-implement this method and
/// reset them there.
virtual void DoResetImplicitIntegratorStatistics() {}
/// @copydoc IntegratorBase::DoReset()
virtual void DoImplicitIntegratorReset() {}
/// Resets any cached Jacobian or iteration matrices owned by child classes.
/// This is called when the user changes the Jacobian computation scheme;
/// the child class should use this to reset its cached matrices.
virtual void DoResetCachedJacobianRelatedMatrices() {}
/// Checks to see whether a Jacobian matrix is "bad" (has any NaN or
/// Inf values) and needs to be recomputed. A divergent Newton-Raphson
/// iteration can cause the state to overflow, which is how the Jacobian can
/// become "bad". This is an O(n²) operation, where n is the state dimension.
bool IsBadJacobian(const MatrixX<T>& J) const;
// TODO(edrumwri) Document the functions below.
virtual int64_t do_get_num_newton_raphson_iterations() const = 0;
virtual int64_t do_get_num_error_estimator_derivative_evaluations() const = 0;
virtual int64_t
do_get_num_error_estimator_derivative_evaluations_for_jacobian()
const = 0;
virtual int64_t do_get_num_error_estimator_newton_raphson_iterations()
const = 0;
virtual int64_t do_get_num_error_estimator_jacobian_evaluations() const = 0;
virtual int64_t do_get_num_error_estimator_iteration_matrix_factorizations()
const = 0;
MatrixX<T>& get_mutable_jacobian() { return J_; }
void DoResetStatistics() override;
void DoReset() final;
// Compute the partial derivative of the ordinary differential equations with
// respect to the state variables for a given x(t).
// @param t the time around which to compute the Jacobian matrix.
// @param x the continuous state around which to compute the Jacobian matrix.
// @post the context's time and continuous state will be temporarily set
// during this call (and then reset to their original values) on return.
// Furthermore, the jacobian_is_fresh_ flag is set to "true", indicating
// that the Jacobian was computed from the most recent time t.
const MatrixX<T>& CalcJacobian(const T& t, const VectorX<T>& x);
// Computes the Jacobian of the ordinary differential equations around time
// and continuous state `(t, xt)` using a first-order forward difference
// (i.e., numerical differentiation).
// @param system The dynamical system.
// @param t the time around which to compute the Jacobian matrix.
// @param xt the continuous state around which to compute the Jacobian matrix.
// @param context the Context of the system, at time and continuous state
// unknown.
// @param[out] J the Jacobian matrix around time and state `(t, xt)`.
// @post The continuous state will be indeterminate on return.
void ComputeForwardDiffJacobian(const System<T>& system, const T& t,
const VectorX<T>& xt, Context<T>* context, MatrixX<T>* J);
// Computes the Jacobian of the ordinary differential equations around time
// and continuous state `(t, xt)` using a second-order central difference
// (i.e., numerical differentiation).
// @param system The dynamical system.
// @param t the time around which to compute the Jacobian matrix.
// @param xt the continuous state around which to compute the Jacobian matrix.
// @param context the Context of the system, at time and continuous state
// unknown.
// @param[out] J the Jacobian matrix around time and state `(t, xt)`.
// @post The continuous state will be indeterminate on return.
void ComputeCentralDiffJacobian(const System<T>& system, const T& t,
const VectorX<T>& xt, Context<T>* context, MatrixX<T>* J);
// Computes the Jacobian of the ordinary differential equations around time
// and continuous state `(t, xt)` using automatic differentiation.
// @param system The dynamical system.
// @param t the time around which to compute the Jacobian matrix.
// @param xt the continuous state around which to compute the Jacobian matrix.
// @param context the Context of the system, at time and continuous state
// unknown.
// @param[out] J the Jacobian matrix around time and state `(t, xt)`.
// @post The continuous state will be indeterminate on return.
void ComputeAutoDiffJacobian(const System<T>& system, const T& t,
const VectorX<T>& xt, const Context<T>& context, MatrixX<T>* J);
/// @copydoc IntegratorBase::DoStep()
virtual bool DoImplicitIntegratorStep(const T& h) = 0;
// Methods for derived classes to increment the factorization and Jacobian
// evaluation counts.
void increment_num_iter_factorizations() {
++num_iter_factorizations_;
}
void increment_jacobian_computation_derivative_evaluations(int count) {
num_jacobian_function_evaluations_ += count;
}
void increment_jacobian_evaluations() {
++num_jacobian_evaluations_;
}
void set_jacobian_is_fresh(bool flag) {
jacobian_is_fresh_ = flag;
}
private:
bool DoStep(const T& h) final {
bool result = DoImplicitIntegratorStep(h);
// If the implicit step is successful (result is true), we need a new
// Jacobian (fresh is false). Otherwise, a failed step (result is false)
// means we can keep the Jacobian (fresh is true). Therefore fresh =
// !result, almost always.
// The exception is when the implicit step fails during the second half-
// step of ImplicitEulerIntegrator, in which case the Jacobian is not from
// the beginning of the step, and so fresh should be false. We leave it
// untouched here to keep the design of ImplicitIntegrator<T> simple, and
// let ImplicitEulerIntegrator<T> handle this flag on its own at the
// beginning of ImplicitEulerIntegrator<T>::DoImplicitIntegratorStep().
jacobian_is_fresh_ = !result;
return result;
}
// The scheme to be used for computing the Jacobian matrix during the
// nonlinear system solve process.
JacobianComputationScheme jacobian_scheme_{
JacobianComputationScheme::kForwardDifference};
// The last computed Jacobian matrix.
MatrixX<T> J_;
// Indicates whether the Jacobian matrix is fresh. We say the Jacobian is
// "fresh" if it was last computed at a state (t0, x0) from the beginning of
// the current step. This indicates to MaybeFreshenMatrices that it should
// not recompute the Jacobian, but rather it should fail immediately. This
// is only used when use_full_newton_ and reuse_ are set to false.
bool jacobian_is_fresh_{false};
// If set to `false`, Jacobian matrices and iteration matrix factorizations
// will not be reused.
bool reuse_{true};
// If set to `true`, Jacobian matrices and iteration matrix factorizations
// will be freshly computed on every Newton-Raphson iteration. This should
// only ever be useful in debugging.
bool use_full_newton_{false};
// Various combined statistics.
int64_t num_iter_factorizations_{0};
int64_t num_jacobian_evaluations_{0};
int64_t num_jacobian_function_evaluations_{0};
};
// We do not support computing the Jacobian matrix using automatic
// differentiation when the scalar is already an AutoDiff type.
// Note: must be declared inline because it's specialized and located in the
// header file (to avoid multiple definition errors).
template <>
inline void ImplicitIntegrator<AutoDiffXd>::
ComputeAutoDiffJacobian(const System<AutoDiffXd>&,
const AutoDiffXd&, const VectorX<AutoDiffXd>&,
const Context<AutoDiffXd>&, MatrixX<AutoDiffXd>*) {
throw std::runtime_error("AutoDiff'd Jacobian not supported from "
"AutoDiff'd ImplicitIntegrator");
}
// Factors a dense matrix (the iteration matrix). This
// AutoDiff-specialized method is necessary because Eigen's LU factorization,
// which should be faster than the QR factorization used here, is not currently
// AutoDiff-able (while the QR factorization *is* AutoDiff-able).
// Note: must be declared inline because it's specialized and located in the
// header file (to avoid multiple definition errors).
template <>
inline void ImplicitIntegrator<AutoDiffXd>::IterationMatrix::
SetAndFactorIterationMatrix(const MatrixX<AutoDiffXd>& iteration_matrix) {
QR_.compute(iteration_matrix);
matrix_factored_ = true;
}
// Solves the linear system Ax = b for x using the iteration matrix (A)
// factored using QR decomposition.
// @see Factor()
// Note: must be declared inline because it's specialized and located in the
// header file (to avoid multiple definition errors).
template <>
inline VectorX<AutoDiffXd>
ImplicitIntegrator<AutoDiffXd>::IterationMatrix::Solve(
const VectorX<AutoDiffXd>& b) const {
return QR_.solve(b);
}
} // namespace systems
} // namespace drake
DRAKE_DECLARE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS(
class drake::systems::ImplicitIntegrator)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/semi_explicit_euler_integrator.h | #pragma once
#include <utility>
#include "drake/common/default_scalars.h"
#include "drake/common/drake_copyable.h"
#include "drake/systems/analysis/integrator_base.h"
namespace drake {
namespace systems {
/**
* A first-order, semi-explicit Euler integrator. State is updated in the
* following manner: <pre>
* v(t₀+h) = v(t₀) + dv/dt(t₀) * h
* dq/dt = N(q(t₀)) * v(t₀+h)
* q(t₀+h) = q(t₀) + dq/dt * h
* </pre>
* where `v` are the generalized velocity variables and `q` are generalized
* coordinates. `h` is the integration step size, and `N` is a matrix
* (dependent upon `q(t₀)`) that maps velocities to time derivatives of
* generalized coordinates. For rigid body systems in 2D, for example, `N`
* will generally be an identity matrix. For a single rigid body in 3D, `N` and
* its pseudo-inverse (`N` is generally non-square but always left invertible)
* are frequently used to transform between time derivatives of Euler
* parameters (unit quaternions) and angular velocities (and vice versa),
* [Nikravesh 1988].
*
* Note that these equations imply that the velocity variables are updated
* first and that these new velocities are then used to update the generalized
* coordinates (compare to ExplicitEulerIntegrator, where the generalized
* coordinates are updated using the previous velocity variables).
*
* When a mechanical system is Hamiltonian (informally meaning that the
* system is not subject to velocity-dependent forces), the semi-explicit
* Euler integrator is a symplectic (energy conserving) integrator.
* Symplectic integrators advertise energetically consistent behavior with large
* step sizes compared to non-symplectic integrators. Multi-body systems
* are not Hamiltonian, even in the absence of externally applied
* velocity-dependent forces, due to the presence of both Coriolis and
* gyroscopic forces. This integrator thus does not generally conserve energy
* for such systems.
*
* <h4>Association between time stepping and the semi-explicit Euler
* integrator:</h4>
* Though many time stepping approaches use the formulations above, these
* equations do not represent a "time stepping scheme". The semi-explicit
* Euler integration equations can be applied from one point in state space to
* another, assuming smoothness in between, just like any other integrator using
* the following process:
* (1) a simulator integrates to discontinuities, (2) the state of the ODE/DAE
* is re-initialized, and (3) integration continues.
*
* In contrast, time stepping schemes enforce all constraints at a single
* time in the integration process: though a billiard break may consist of tens
* of collisions occurring sequentially over a millisecond of time, a time
* stepping method will treat all of these collisions as occurring
* simultaneously.
*
* - [Nikravesh 1988] P. Nikravesh. Computer-Aided Analysis of Mechanical
* Systems. Prentice Hall. New Jersey, 1988.
* - [Stewart 2000] D. Stewart. Rigid-body Dynamics with Friction and
* Impact. SIAM Review, 42:1, 2000.
*
* @tparam_nonsymbolic_scalar
* @ingroup integrators
*/
template <class T>
class SemiExplicitEulerIntegrator final : public IntegratorBase<T> {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(SemiExplicitEulerIntegrator)
virtual ~SemiExplicitEulerIntegrator() {}
// TODO(edrumwri): update documentation to account for stretching (after
// stretching has become a user settable).
/**
* Constructs a fixed-step integrator for a given system using the given
* context for initial conditions.
* @param system A reference to the system to be simulated.
* @param max_step_size The maximum (fixed) step size; the integrator will
* not take larger step sizes than this.
* @param context Pointer to the context (nullptr is ok, but the caller
* must set a non-null context before Initialize()-ing the
* integrator).
* @sa Initialize()
*/
SemiExplicitEulerIntegrator(const System<T>& system, const T& max_step_size,
Context<T>* context = nullptr)
: IntegratorBase<T>(system, context),
qdot_(context->get_continuous_state().num_q()) {
IntegratorBase<T>::set_maximum_step_size(max_step_size);
}
/**
* Gets the error estimate order (returns zero, since error estimation is
* not provided).
*/
int get_error_estimate_order() const override { return 0; }
/**
* Integrator does not support accuracy estimation.
*/
bool supports_error_estimation() const override { return false; }
private:
bool DoStep(const T& h) override;
// This is a pre-allocated temporary for use by integration
BasicVector<T> qdot_;
};
/**
* Integrates the system forward in time by h. This value is determined
* by IntegratorBase::StepOnce().
*/
template <class T>
bool SemiExplicitEulerIntegrator<T>::DoStep(const T& h) {
const System<T>& system = this->get_system();
Context<T>& context = *this->get_mutable_context();
// CAUTION: This is performance-sensitive inner loop code that uses dangerous
// long-lived references into state and cache to avoid unnecessary copying and
// cache invalidation. Be careful not to insert calls to methods that could
// invalidate any of these references before they are used.
// Evaluate derivative xcdot(t₀) ← xcdot(t₀, x(t₀), u(t₀)).
const ContinuousState<T>& xc_deriv = this->EvalTimeDerivatives(context);
// Retrieve the accelerations and auxiliary variable derivatives.
const VectorBase<T>& vdot = xc_deriv.get_generalized_velocity();
const VectorBase<T>& zdot = xc_deriv.get_misc_continuous_state();
// Cache: vdot and zdot reference the live derivative cache value, currently
// up to date but about to be marked out of date. We do not want to make
// an unnecessary copy of this data.
// This invalidates computations that are dependent on v or z.
// Marks v- and z-dependent cache entries out of date, including vdot and
// zdot; time doesn't change here.
std::pair<VectorBase<T>*, VectorBase<T>*> vz = context.GetMutableVZVectors();
VectorBase<T>& v = *vz.first;
VectorBase<T>& z = *vz.second;
// Cache: vdot and zdot still reference the derivative cache value, which is
// unchanged, although it is marked out of date.
// Update the velocity and auxiliary state variables.
v.PlusEqScaled(h, vdot);
z.PlusEqScaled(h, zdot);
// Convert the updated generalized velocity to the time derivative of
// generalized coordinates. Note that this mapping is q-dependent and
// hasn't been invalidated if it was pre-computed.
system.MapVelocityToQDot(context, v, &qdot_);
// Now set time and q to their final values. This marks time- and
// q-dependent cache entries out of date. That includes the derivative
// cache entry though we don't need it again here.
VectorBase<T>& q =
context.SetTimeAndGetMutableQVector(context.get_time() + h);
q.PlusEqScaled(h, qdot_);
// This integrator always succeeds at taking the step.
return true;
}
} // namespace systems
} // namespace drake
DRAKE_DECLARE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS(
class drake::systems::SemiExplicitEulerIntegrator)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/simulator_config.h | #pragma once
#include <string>
#include "drake/common/name_value.h"
namespace drake {
namespace systems {
// TODO(dale.mcconachie) Update to include all configurable properties of
// IntegratorBase. Currently, initial_step_size_target, minimum_step_size, and
// throw_on_minimum_step_size_violation are missing.
/// The set of all configurable properties on a Simulator and IntegratorBase.
struct SimulatorConfig {
template <typename Archive>
/// Passes this object to an Archive.
/// Refer to @ref yaml_serialization "YAML Serialization" for background.
void Serialize(Archive* a) {
a->Visit(DRAKE_NVP(integration_scheme));
a->Visit(DRAKE_NVP(max_step_size));
a->Visit(DRAKE_NVP(accuracy));
a->Visit(DRAKE_NVP(use_error_control));
a->Visit(DRAKE_NVP(target_realtime_rate));
a->Visit(DRAKE_NVP(publish_every_time_step));
}
std::string integration_scheme{"runge_kutta3"};
double max_step_size{0.1};
double accuracy{1.0e-4};
bool use_error_control{true};
double target_realtime_rate{0.0};
/// Sets Simulator::set_publish_at_initialization() in addition to
/// Simulator::set_publish_every_time_step() when applied by
/// ApplySimulatorConfig().
bool publish_every_time_step{false};
};
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/lyapunov.cc | #include "drake/systems/analysis/lyapunov.h"
#include <string>
#include "drake/common/symbolic/expression.h"
#include "drake/common/text_logging.h"
#include "drake/math/autodiff.h"
#include "drake/math/autodiff_gradient.h"
#include "drake/solvers/mathematical_program.h"
#include "drake/solvers/solve.h"
namespace drake {
namespace systems {
namespace analysis {
using Eigen::VectorXd;
using symbolic::Expression;
namespace {
// Helper because AddLinearConstraint throws if I pass in something trivially
// true.
void AddLinearConstraintIfNonTrivial(const symbolic::Formula &f,
solvers::MathematicalProgram *prog) {
if (!symbolic::is_true(f)) {
prog->AddLinearConstraint(f);
}
}
} // namespace
Eigen::VectorXd SampleBasedLyapunovAnalysis(
const System<double>& system, const Context<double>& context,
const std::function<VectorX<AutoDiffXd>(const VectorX<AutoDiffXd>& state)>&
basis_functions,
const Eigen::Ref<const Eigen::MatrixXd>& state_samples,
const Eigen::Ref<const Eigen::VectorXd>& V_zero_state) {
const int state_size = state_samples.rows();
const int num_samples = state_samples.cols();
DRAKE_DEMAND(state_size > 0);
DRAKE_DEMAND(num_samples > 0);
DRAKE_DEMAND(V_zero_state.rows() == state_size);
// TODO(russt): handle discrete state.
DRAKE_DEMAND(context.has_only_continuous_state());
DRAKE_DEMAND(context.num_continuous_states() == state_size);
// TODO(russt): check that the system is time-invariant.
solvers::MathematicalProgram prog;
const VectorXd phi0 =
math::ExtractValue(basis_functions(V_zero_state));
const int num_parameters = phi0.size();
DRAKE_DEMAND(num_parameters > 0);
const solvers::VectorXDecisionVariable params =
prog.NewContinuousVariables(num_parameters, "a");
// Add an objective that drives Vdot ~= -1.
// Note(russt): Tried the L2 norm version of this (which doesn't require
// the slack variables), but it was significantly slower.
// Note(russt): Also had a version of this that accepted an optional
// MatrixXd of V(x₁) = 1 as a different way to set the boundary conditions.
// But having this objective is much more generally useful, I think.
//
// Add slack variables s >= |Vdot + 1|.
const solvers::VectorXDecisionVariable slack = prog.NewContinuousVariables
(num_samples, "s");
// Minimize ∑ sᵢ
prog.AddLinearCost(VectorXd::Ones(num_samples), 0, slack);
drake::log()->info("Building mathematical program.");
// V(x₀) = 0.
AddLinearConstraintIfNonTrivial(params.dot(phi0) == 0, &prog);
Eigen::VectorXd state(state_size);
VectorX<AutoDiffXd> autodiff_state(state_size);
auto my_context = context.Clone();
auto& context_state = my_context->get_mutable_continuous_state_vector();
auto derivatives = system.AllocateTimeDerivatives();
for (int si = 0; si < num_samples; si++) {
state = state_samples.col(si);
math::InitializeAutoDiff(state, &autodiff_state);
const VectorX<AutoDiffXd> phi = basis_functions(autodiff_state);
const Expression V = params.dot(math::ExtractValue(phi));
context_state.SetFromVector(state);
system.CalcTimeDerivatives(*my_context, derivatives.get());
const Eigen::VectorXd phidot =
math::ExtractGradient(phi) * derivatives->CopyToVector();
const Expression Vdot = params.dot(phidot);
// ∀xᵢ, V(xᵢ) ≥ 0
AddLinearConstraintIfNonTrivial(V >= 0., &prog);
// ∀xᵢ, V̇(xᵢ) = ∂V/∂x f(xᵢ) ≤ 0.
AddLinearConstraintIfNonTrivial(Vdot <= 0., &prog);
// ∀i, sᵢ ≥ |V̇(xᵢ) + 1|.
prog.AddLinearConstraint(slack(si) >= Vdot + 1);
prog.AddLinearConstraint(slack(si) >= -(Vdot + 1));
}
drake::log()->info("Solving program.");
const solvers::MathematicalProgramResult result = Solve(prog);
if (!result.is_success()) {
drake::log()->error("No solution found. SolutionResult = " +
to_string(result.get_solution_result()));
}
drake::log()->info("Done solving program.");
return result.GetSolution(params);
}
} // namespace analysis
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/dense_output.cc | #include "drake/systems/analysis/dense_output.h"
DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_SCALARS(
class drake::systems::DenseOutput)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/explicit_euler_integrator.h | #pragma once
#include <memory>
#include "drake/common/default_scalars.h"
#include "drake/common/drake_copyable.h"
#include "drake/systems/analysis/integrator_base.h"
namespace drake {
namespace systems {
/**
* A first-order, explicit Euler integrator. State is updated in the following
* manner:
* <pre>
* x(t+h) = x(t) + dx/dt * h
* </pre>
*
* @tparam_default_scalar
* @ingroup integrators
*/
template <class T>
class ExplicitEulerIntegrator final : public IntegratorBase<T> {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(ExplicitEulerIntegrator)
~ExplicitEulerIntegrator() override = default;
/**
* Constructs a fixed-step integrator for a given system using the given
* context for initial conditions.
* @param system A reference to the system to be simulated
* @param max_step_size The maximum (fixed) step size; the integrator will
* not take larger step sizes than this.
* @param context Pointer to the context (nullptr is ok, but the caller
* must set a non-null context before Initialize()-ing the
* integrator).
* @sa Initialize()
*/
ExplicitEulerIntegrator(const System<T>& system, const T& max_step_size,
Context<T>* context = nullptr)
: IntegratorBase<T>(system, context) {
IntegratorBase<T>::set_maximum_step_size(max_step_size);
}
/**
* Explicit Euler integrator does not support error estimation.
*/
bool supports_error_estimation() const override { return false; }
/// Integrator does not provide an error estimate.
int get_error_estimate_order() const override { return 0; }
private:
bool DoStep(const T& h) override;
};
/**
* Integrates the system forward in time by h, starting at the current time t₀.
* This value of h is determined by IntegratorBase::Step().
*/
template <class T>
bool ExplicitEulerIntegrator<T>::DoStep(const T& h) {
Context<T>& context = *this->get_mutable_context();
// CAUTION: This is performance-sensitive inner loop code that uses dangerous
// long-lived references into state and cache to avoid unnecessary copying and
// cache invalidation. Be careful not to insert calls to methods that could
// invalidate any of these references before they are used.
// Evaluate derivative xcdot₀ ← xcdot(t₀, x(t₀), u(t₀)).
const ContinuousState<T>& xc_deriv = this->EvalTimeDerivatives(context);
const VectorBase<T>& xcdot0 = xc_deriv.get_vector();
// Cache: xcdot0 references the live derivative cache value, currently
// up to date but about to be marked out of date. We do not want to make
// an unnecessary copy of this data.
// Update continuous state and time. This call marks t- and xc-dependent
// cache entries out of date, including xcdot0.
VectorBase<T>& xc = context.SetTimeAndGetMutableContinuousStateVector(
context.get_time() + h); // t ← t₀ + h
// Cache: xcdot0 still references the derivative cache value, which is
// unchanged, although it is marked out of date.
xc.PlusEqScaled(h, xcdot0); // xc(t₀ + h) ← xc(t₀) + h * xcdot₀
// This integrator always succeeds at taking the step.
return true;
}
} // namespace systems
} // namespace drake
DRAKE_DECLARE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_SCALARS(
class drake::systems::ExplicitEulerIntegrator)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/lyapunov.h | #pragma once
#include <functional>
#include "drake/common/autodiff.h"
#include "drake/common/eigen_types.h"
#include "drake/systems/framework/context.h"
#include "drake/systems/framework/system.h"
namespace drake {
namespace systems {
namespace analysis {
/// Sets up a linear program to search for the coefficients of a
/// Lyapunov function that satisfies the Lyapunov conditions at a set
/// of sample points.
/// ∀xᵢ, V(xᵢ) ≥ 0,
/// ∀xᵢ, V̇(xᵢ) = ∂V/∂x f(xᵢ) ≤ 0.
/// In order to provide boundary conditions to the problem, and improve
/// numerical conditioning, we additionally impose the constraint
/// V(x₀) = 0,
/// and add an objective that pushes V̇(xᵢ) towards -1 (time-to-go):
/// min ∑ |V̇(xᵢ) + 1|.
///
/// For background, and a description of this algorithm, see
/// http://underactuated.csail.mit.edu/underactuated.html?chapter=lyapunov .
/// It currently requires that the system to be optimized has only continuous
/// state and it is assumed to be time invariant.
///
/// @param system to be verified. We currently require that the system has
/// only continuous state, and it is assumed to be time invariant. Unlike
/// many analysis algorithms, the system does *not* need to support conversion
/// to other ScalarTypes (double is sufficient).
///
/// @param context is used only to specify any parameters of the system, and to
/// fix any input ports. The system/context must have all inputs assigned.
///
/// @param basis_functions must define an AutoDiffXd function that takes the
/// state vector as an input argument and returns the vector of values of the
/// basis functions at that state. The Lyapunov function will then have the
/// form
/// V(x) = ∑ pᵢ φᵢ(x),
/// where `p` is the vector to be solved for and `φ(x)` is the vector of
/// basis function evaluations returned by this function.
///
/// @param state_samples is a list of sample states (one per column) at which
/// to apply the optimization constraints and the objective.
///
/// @param V_zero_state is a particular state, x₀, where we impose the
/// condition: V(x₀) = 0.
///
/// @return params the VectorXd of parameters, p, that satisfies the Lyapunov
/// conditions described above. The resulting Lyapunov function is
/// V(x) = ∑ pᵢ φᵢ(x),
///
/// @ingroup analysis
Eigen::VectorXd SampleBasedLyapunovAnalysis(
const System<double>& system, const Context<double>& context,
const std::function<VectorX<AutoDiffXd>(const VectorX<AutoDiffXd>& state)>&
basis_functions,
const Eigen::Ref<const Eigen::MatrixXd>& state_samples,
const Eigen::Ref<const Eigen::VectorXd>& V_zero_state);
} // namespace analysis
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/scalar_dense_output.h | #pragma once
#include "drake/common/default_scalars.h"
#include "drake/common/drake_copyable.h"
#include "drake/systems/analysis/dense_output.h"
namespace drake {
namespace systems {
/// A DenseOutput class interface extension to deal with scalar ODE
/// solutions. A ScalarDenseOutput instance is also a DenseOutput
/// instance with single element vector values (i.e. size() == 1).
/// As such, its value can evaluated in both scalar and vectorial
/// form (via EvaluateScalar() and Evaluate(), respectively).
///
/// @tparam_default_scalar
template <typename T>
class ScalarDenseOutput : public DenseOutput<T> {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(ScalarDenseOutput)
virtual ~ScalarDenseOutput() = default;
/// Evaluates output at the given time @p t.
/// @param t Time at which to evaluate output.
/// @returns Output scalar value.
/// @pre Output is not empty i.e. is_empty() is false.
/// @throws std::exception if any of the preconditions is not met.
/// @throws std::exception if given @p t is not within output's domain
/// i.e. @p t ∉ [start_time(), end_time()].
T EvaluateScalar(const T& t) const {
this->ThrowIfOutputIsEmpty(__func__);
this->ThrowIfTimeIsInvalid(__func__, t);
return this->DoEvaluateScalar(t);
}
protected:
ScalarDenseOutput() = default;
VectorX<T> DoEvaluate(const T& t) const override {
return VectorX<T>::Constant(1, this->DoEvaluateScalar(t));
}
int do_size() const override {
return 1;
}
// @see EvaluateScalar(const T&)
virtual T DoEvaluateScalar(const T& t) const = 0;
};
} // namespace systems
} // namespace drake
DRAKE_DECLARE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_SCALARS(
class drake::systems::ScalarDenseOutput)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/simulator_gflags.cc | #include "drake/systems/analysis/simulator_gflags.h"
#include <stdexcept>
#include <utility>
#include "drake/common/default_scalars.h"
#include "drake/common/drake_throw.h"
#include "drake/common/text_logging.h"
#include "drake/systems/analysis/simulator.h"
#include "drake/systems/analysis/simulator_config_functions.h"
// === Simulator's parameters ===
DEFINE_double(simulator_target_realtime_rate,
drake::systems::SimulatorConfig{}.target_realtime_rate,
"[Simulator flag] Desired rate relative to real time. See "
"documentation for Simulator::set_target_realtime_rate() for "
"details.");
DEFINE_bool(simulator_publish_every_time_step,
drake::systems::SimulatorConfig{}.publish_every_time_step,
"[Simulator flag] Sets whether the simulation should trigger a "
"forced-Publish event at the end of every trajectory-advancing "
"step. This also includes the very first publish at t = 0 (see "
"Simulator::set_publish_at_initialization())."
"See Simulator::set_publish_every_time_step() for details.");
// === Integrator's parameters ===
// N.B. The list here must be kept in sync with GetSupportedIntegrators() in
// simulator_config_functions.cc.
DEFINE_string(simulator_integration_scheme,
drake::systems::SimulatorConfig{}.integration_scheme,
"[Integrator flag] Integration scheme to be used. Available "
"options are: "
"'bogacki_shampine3', "
"'explicit_euler', "
"'implicit_euler', "
"'radau1', "
"'radau3', "
"'runge_kutta2', "
"'runge_kutta3', "
"'runge_kutta5', "
"'semi_explicit_euler', "
"'velocity_implicit_euler'");
DEFINE_double(simulator_max_time_step,
drake::systems::SimulatorConfig{}.max_step_size,
"[Integrator flag] Maximum simulation time step used for "
"integration. [s].");
DEFINE_double(simulator_accuracy, drake::systems::SimulatorConfig{}.accuracy,
"[Integrator flag] Sets the simulation accuracy for variable "
"step size integrators with error control.");
DEFINE_bool(simulator_use_error_control,
drake::systems::SimulatorConfig{}.use_error_control,
"[Integrator flag] If 'true', the simulator's integrator will use "
"error control if it supports it. Otherwise, the simulator "
"attempts to use fixed steps.");
namespace drake {
namespace systems {
namespace internal {
template <typename T>
IntegratorBase<T>& ResetIntegratorFromGflags(Simulator<T>* simulator) {
DRAKE_THROW_UNLESS(simulator != nullptr);
IntegratorBase<T>& integrator =
ResetIntegratorFromFlags(
simulator, FLAGS_simulator_integration_scheme,
T(FLAGS_simulator_max_time_step));
// For integrators that support error control, turn on or off error control
// based on the simulator_use_error_control flag.
if (integrator.supports_error_estimation()) {
integrator.set_fixed_step_mode(!FLAGS_simulator_use_error_control);
}
if (!integrator.get_fixed_step_mode()) {
integrator.set_target_accuracy(FLAGS_simulator_accuracy);
} else {
// Integrator is running in fixed step mode, therefore we warn the user if
// the accuracy flag was changed from the command line.
if (FLAGS_simulator_accuracy != drake::systems::SimulatorConfig{}.accuracy)
log()->warn(
"Integrator accuracy provided, however the integrator is running in "
"fixed step mode. The 'simulator_accuracy' flag will be ignored. "
"Switch to an error controlled scheme if you want accuracy control.");
}
return integrator;
}
template <typename T>
std::unique_ptr<Simulator<T>> MakeSimulatorFromGflags(
const System<T>& system, std::unique_ptr<Context<T>> context) {
auto simulator = std::make_unique<Simulator<T>>(system, std::move(context));
const SimulatorConfig config {
FLAGS_simulator_integration_scheme,
FLAGS_simulator_max_time_step,
FLAGS_simulator_accuracy,
FLAGS_simulator_use_error_control,
FLAGS_simulator_target_realtime_rate,
FLAGS_simulator_publish_every_time_step
};
ApplySimulatorConfig(config, simulator.get());
return simulator;
}
DRAKE_DEFINE_FUNCTION_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS((
&ResetIntegratorFromGflags<T>,
&MakeSimulatorFromGflags<T>
))
} // namespace internal
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/bogacki_shampine3_integrator.h | #pragma once
#include <memory>
#include "drake/common/default_scalars.h"
#include "drake/common/drake_copyable.h"
#include "drake/systems/analysis/integrator_base.h"
namespace drake {
namespace systems {
/**
A third-order, four-stage, first-same-as-last (FSAL) Runge-Kutta integrator
with a second order error estimate.
For a discussion of this Runge-Kutta method, see [Hairer, 1993].
The Butcher tableau for this integrator follows:
<pre>
0 |
1/2 | 1/2
3/4 | 0 3/4
1 | 2/9 1/3 4/9
-----------------------------------------------------------------------------
2/9 1/3 4/9 0
7/24 1/4 1/3 1/8
</pre>
where the second to last row is the 3rd-order (propagated) solution and
the last row gives a 2nd-order accurate solution used for error control.
- [Bogacki, 1989] P. Bogacki and L. Shampine. "A 3(2) pair of Runge–Kutta
formulas", Appl. Math. Letters, 2 (4): 321–325, 1989.
@tparam_nonsymbolic_scalar
@ingroup integrators
*/
template <class T>
class BogackiShampine3Integrator final : public IntegratorBase<T> {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(BogackiShampine3Integrator)
~BogackiShampine3Integrator() override = default;
explicit BogackiShampine3Integrator(const System<T>& system,
Context<T>* context = nullptr) : IntegratorBase<T>(system, context) {
derivs1_ = system.AllocateTimeDerivatives();
derivs2_ = system.AllocateTimeDerivatives();
derivs3_ = system.AllocateTimeDerivatives();
err_est_vec_ = std::make_unique<BasicVector<T>>(derivs1_->size());
save_xc0_.resize(derivs1_->size());
}
/**
* The integrator supports error estimation.
*/
bool supports_error_estimation() const override { return true; }
/// The order of the asymptotic term in the error estimate.
int get_error_estimate_order() const override { return 3; }
private:
void DoInitialize() override;
bool DoStep(const T& h) override;
// Vector used in error estimate calculations.
std::unique_ptr<BasicVector<T>> err_est_vec_;
// Vector used to save initial value of xc.
VectorX<T> save_xc0_;
// These are pre-allocated temporaries for use by integration. They store
// the derivatives computed at various points within the integration
// interval.
std::unique_ptr<ContinuousState<T>> derivs1_, derivs2_, derivs3_;
};
} // namespace systems
} // namespace drake
DRAKE_DECLARE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS(
class ::drake::systems::BogackiShampine3Integrator)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/instantaneous_realtime_rate_calculator.cc | #include "drake/systems/analysis/instantaneous_realtime_rate_calculator.h"
#include <utility>
namespace drake {
namespace systems {
namespace internal {
std::optional<double>
InstantaneousRealtimeRateCalculator::UpdateAndRecalculate(
double current_sim_time) {
std::optional<double> realtime_rate;
if (prev_sim_time_.has_value()) {
const double wall_delta{timer_->Tick()};
const double sim_time_delta{current_sim_time - prev_sim_time_.value()};
// Avoid divide by zero and negative rate.
if (wall_delta > 0 && sim_time_delta >= 0) {
realtime_rate = sim_time_delta / wall_delta;
}
}
timer_->Start(); // Restarts the wall timer
prev_sim_time_ = current_sim_time;
return realtime_rate;
}
void InstantaneousRealtimeRateCalculator::InjectMockTimer(
std::unique_ptr<Timer> mock_timer) {
timer_ = std::move(mock_timer);
}
} // namespace internal
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/scalar_initial_value_problem.h | #pragma once
#include <memory>
#include <optional>
#include <utility>
#include "drake/common/default_scalars.h"
#include "drake/common/drake_copyable.h"
#include "drake/common/eigen_types.h"
#include "drake/systems/analysis/initial_value_problem.h"
#include "drake/systems/analysis/scalar_view_dense_output.h"
namespace drake {
namespace systems {
/// A thin wrapper of the InitialValueProblem class to provide a simple
/// interface when solving scalar initial value problems i.e. when evaluating
/// the x(t; 𝐤) solution function to the given ODE dx/dt = f(t, x; 𝐤),
/// where f : t ⨯ x → ℝ , t ∈ ℝ, x ∈ ℝ, 𝐤 ∈ ℝᵐ, along with an initial
/// condition x(t₀; 𝐤) = x₀. The parameter vector 𝐤 allows for generic IVP
/// definitions, which can later be solved for any instance of said vector.
///
/// Note the distinction from general initial value problems where
/// f : t ⨯ 𝐱 → ℝⁿ and 𝐱 ∈ ℝⁿ, addressed by the class being wrapped. While
/// every scalar initial value problem could be written in vector form, this
/// wrapper keeps both problem definition and solution in their scalar form
/// with almost zero overhead, leading to clearer code if applicable.
/// Moreover, this scalar form facilitates single-dimensional quadrature
/// using methods for solving initial value problems.
///
/// See InitialValueProblem class documentation for information on caching
/// support and dense output usage for improved efficiency in scalar IVP
/// solving.
///
/// For further insight into its use, consider the following examples of scalar
/// IVPs:
///
/// - The population growth of an hypothetical bacteria colony is described
/// by dN/dt = r * N. The colony has N₀ subjects at time t₀. In this
/// context, x ≜ N, x₀ ≜ N₀, 𝐤 ≜ [r], dx/dt = f(t, x; 𝐤) = 𝐤₁ * x.
///
/// - The charge Q stored in the capacitor of a (potentially equivalent) series
/// RC circuit driven by a time varying voltage source E(t) can be described
/// by dQ/dt = (E(t) - Q / Cs) / Rs, where Rs refers to the resistor's
/// resistance and Cs refers to the capacitor's capacitance. In this context,
/// and assuming an initial stored charge Q₀ at time t₀, x ≜ Q, 𝐤 ≜ [Rs, Cs],
/// x₀ ≜ Q₀, dx/dt = f(t, x; 𝐤) = (E(t) - x / 𝐤₂) / 𝐤₁.
///
/// @tparam_nonsymbolic_scalar
template <typename T>
class ScalarInitialValueProblem {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(ScalarInitialValueProblem);
/// Scalar ODE dx/dt = f(t, x; 𝐤) function type.
///
/// @param t The independent variable t ∈ ℝ .
/// @param x The dependent variable x ∈ ℝ .
/// @param k The parameter vector 𝐤 ∈ ℝᵐ.
/// @return The derivative dx/dt ∈ ℝ.
using ScalarOdeFunction =
std::function<T(const T& t, const T& x, const VectorX<T>& k)>;
/// Constructs a scalar IVP described by the given @p scalar_ode_function,
/// using given @p x0 as initial conditions, and parameterized with @p k.
///
/// @param scalar_ode_function The ODE function f(t, 𝐱; 𝐤) that describes
/// the state evolution over time. @param x0 The initial state 𝐱₀ ∈ ℝ.
/// @param k The parameter vector 𝐤 ∈ ℝᵐ. By default m=0 (no parameters).
ScalarInitialValueProblem(
const ScalarOdeFunction& scalar_ode_function, const T& x0,
const Eigen::Ref<const VectorX<T>>& k = Vector0<T>{});
/// Solves the IVP from time @p t0 up to time @p tf, using the initial state
/// 𝐱₀ and parameter vector 𝐤 provided in the constructor.
/// @throws std::exception if t0 > tf.
T Solve(const T& t0, const T& tf) const;
/// Solves and yields an approximation of the IVP solution x(t; 𝐤) for the
/// closed time interval between the initial time @p t0 and the final time @p
/// tf, using initial state 𝐱₀ and parameter vector 𝐤 provided in the
/// constructor.
///
/// To this end, the wrapped IntegratorBase instance solves this IVP,
/// advancing time and state from t₀ and 𝐱₀ = 𝐱(@p t0) to @p tf and 𝐱(@p
/// tf), creating a dense output over that [@p t0, @p tf] interval along the
/// way.
///
/// @param tf The IVP will be solved up to this time, which must be ≥ @p t0.
/// Usually, @p t0 < @p tf as an empty dense output would result if @p t0 =
/// @p tf.
/// @returns A dense approximation to 𝐱(t; 𝐤) with 𝐱(t0; 𝐤) = 𝐱₀,
/// defined for t0 ≤ t ≤ tf.
/// @note The larger the given @p tf value is, the larger the approximated
/// interval will be. See documentation of the specific dense output
/// technique in use for reference on performance impact as this
/// interval grows.
/// @throws std::exception if t0 > tf.
std::unique_ptr<ScalarDenseOutput<T>> DenseSolve(const T& t0,
const T& tf) const;
/// Resets the internal integrator instance by in-place
/// construction of the given integrator type.
///
/// A usage example is shown below.
/// @code{.cpp}
/// scalar_ivp.reset_integrator<RungeKutta2Integrator<T>>(max_step);
/// @endcode
///
/// @param args The integrator type-specific arguments.
/// @returns The new integrator instance.
/// @tparam Integrator The integrator type, which must be an
/// IntegratorBase subclass.
/// @tparam Args The integrator specific argument types.
/// @warning This operation invalidates pointers returned by
/// ScalarInitialValueProblem::get_integrator() and
/// ScalarInitialValueProblem::get_mutable_integrator().
template <typename Integrator, typename... Args>
Integrator* reset_integrator(Args&&... args) {
return vector_ivp_->template reset_integrator<Integrator>(
std::forward<Args>(args)...);
}
/// Gets a reference to the internal integrator instance.
const IntegratorBase<T>& get_integrator() const {
return vector_ivp_->get_integrator();
}
/// Gets a mutable reference to the internal integrator instance.
IntegratorBase<T>& get_mutable_integrator() {
return vector_ivp_->get_mutable_integrator();
}
private:
// Vector IVP representation of this scalar IVP.
std::unique_ptr<InitialValueProblem<T>> vector_ivp_;
};
} // namespace systems
} // namespace drake
DRAKE_DECLARE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS(
class drake::systems::ScalarInitialValueProblem)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/scalar_view_dense_output.h | #pragma once
#include <memory>
#include <utility>
#include <fmt/format.h>
#include "drake/common/default_scalars.h"
#include "drake/common/drake_copyable.h"
#include "drake/systems/analysis/dense_output.h"
#include "drake/systems/analysis/scalar_dense_output.h"
namespace drake {
namespace systems {
/// A ScalarDenseOutput class implementation that wraps a
/// DenseOutput class instance and behaves as a view to one of
/// its elements.
///
/// @tparam_default_scalar
template <typename T>
class ScalarViewDenseOutput : public ScalarDenseOutput<T> {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(ScalarViewDenseOutput)
/// Constructs a view of another DenseOutput instance.
/// @param base_output Base dense output to operate with.
/// @param n The nth scalar element (0-indexed) of the output value
/// to view.
/// @throws std::exception if @p base_output is nullptr.
/// @throws std::exception if given @p n does not refer to a valid
/// base output dimension
/// i.e. @p n ∉ [0, `base_output`->size()).
explicit ScalarViewDenseOutput(
std::unique_ptr<DenseOutput<T>> base_output, int n)
: base_output_(std::move(base_output)), n_(n) {
if (base_output_ == nullptr) {
throw std::runtime_error("Base dense output to view is null.");
}
if (n < 0 || base_output_->size() <= n) {
throw std::runtime_error(fmt::format(
"Index {} out of base dense output [0, {}) range.",
n, base_output_->size()));
}
}
/// Returns the base dense output upon which the
/// view operates.
const DenseOutput<T>* get_base_output() const {
return base_output_.get();
}
protected:
T DoEvaluateScalar(const T& t) const override {
return base_output_->EvaluateNth(t, n_);
}
bool do_is_empty() const override {
return base_output_->is_empty();
}
const T& do_start_time() const override {
return base_output_->start_time();
}
const T& do_end_time() const override {
return base_output_->end_time();
}
// The base (vector) dense output being wrapped.
const std::unique_ptr<DenseOutput<T>> base_output_;
// The nth scalar element (0-indexed) of the base
// (vector) dense output value to view.
const int n_;
};
} // namespace systems
} // namespace drake
DRAKE_DECLARE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_SCALARS(
class drake::systems::ScalarViewDenseOutput)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/scalar_dense_output.cc | #include "drake/systems/analysis/scalar_dense_output.h"
DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_SCALARS(
class drake::systems::ScalarDenseOutput)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/simulator_gflags.h | #pragma once
// @file
// This file defines gflags settings to control Simulator settings.
// Only include this from translation units that declare a `main` function.
#include <memory>
#include <gflags/gflags.h>
#include "drake/systems/analysis/integrator_base.h"
#include "drake/systems/analysis/simulator.h"
// Declares integrator gflags.
DECLARE_string(simulator_integration_scheme);
DECLARE_double(simulator_max_time_step);
DECLARE_double(simulator_accuracy);
DECLARE_bool(simulator_use_error_control);
// Declares simulator gflags.
DECLARE_double(simulator_target_realtime_rate);
DECLARE_bool(simulator_publish_every_time_step);
namespace drake {
namespace systems {
namespace internal {
// Resets the integrator used to advanced the continuous time dynamics of the
// system associated with `simulator` according to the gflags declared in this
// file.
// @param[in,out] simulator
// On input, a valid pointer to a Simulator. On output the
// integrator for `simulator` is reset according to the gflags declared in
// this file.
// @tparam_nonsymbolic_scalar
// @returns A reference to the newly created integrator owned by `simulator`.
template <typename T>
IntegratorBase<T>& ResetIntegratorFromGflags(Simulator<T>* simulator);
// Makes a new simulator according to the gflags declared in this file.
// @param[in] system
// The System to be associated with the newly crated Simulator. You must
// ensure that `system` has a longer lifetime than the new Simulator.
// @param[in] context
// The Context that will be used as the initial condition for the simulation;
// otherwise the Simulator will obtain a default Context from `system`.
// @tparam_nonsymbolic_scalar
// @returns The newly created Simulator.
template <typename T>
std::unique_ptr<Simulator<T>> MakeSimulatorFromGflags(
const System<T>& system, std::unique_ptr<Context<T>> context = nullptr);
} // namespace internal
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/integrator_base.cc | #include "drake/systems/analysis/integrator_base.h"
namespace drake {
namespace systems {
template <class T>
bool IntegratorBase<T>::StepOnceErrorControlledAtMost(const T& h_max) {
using std::isnan;
using std::min;
// Verify that the integrator supports error estimates.
if (!supports_error_estimation()) {
throw std::logic_error("StepOnceErrorControlledAtMost() requires error "
"estimation.");
}
// Save time, continuous variables, and time derivative because we'll possibly
// revert time and state.
const Context<T>& context = get_context();
const T current_time = context.get_time();
VectorBase<T>& xc =
get_mutable_context()->get_mutable_continuous_state_vector();
xc0_save_ = xc.CopyToVector();
// Set the step size to attempt.
T step_size_to_attempt = get_ideal_next_step_size();
if (isnan(step_size_to_attempt)) {
// Integrator has not taken a step. Set the current step size to the
// initial step size.
step_size_to_attempt = get_initial_step_size_target();
DRAKE_DEMAND(!isnan(step_size_to_attempt));
}
// This variable indicates when the integrator has been pushed to its minimum
// step limit. It can only be "true" if minimum step exceptions have been
// suppressed by the user via set_throw_on_minimum_step_size_violation(false),
// and the error control mechanism determines that the step is as low as it
// can go.
bool at_minimum_step_size = false;
bool step_succeeded = false;
do {
// Constants used to determine whether modifications to the step size are
// close enough to the attempted step size to use the unadjusted originals,
// or (1) whether the step size to be attempted is so small that we should
// consider it to be artificially limited or (2) whether the step size to
// be attempted is sufficiently close to that requested such that the step
// size should be stretched slightly.
const double near_enough_smaller = 0.95;
const double near_enough_larger = 1.001;
// If we lose more than a small fraction of the step size we wanted
// to take due to a need to stop at h_max, make a note of that so the
// step size adjuster won't try to grow from the current step.
bool h_was_artificially_limited = false;
if (h_max < near_enough_smaller * step_size_to_attempt) {
// h_max much smaller than current step size.
h_was_artificially_limited = true;
step_size_to_attempt = h_max;
} else {
if (h_max < near_enough_larger * step_size_to_attempt) {
// h_max is roughly current step. Make it the step size to prevent
// creating a small sliver (the remaining step).
step_size_to_attempt = h_max;
}
}
// Limit the current step size.
step_size_to_attempt = min(step_size_to_attempt, get_maximum_step_size());
// Keep adjusting the integration step size until any integrator
// convergence failures disappear. Note: this loop's correctness is
// predicated on the assumption that an integrator will always converge for
// a sufficiently small, yet nonzero step size.
T adjusted_step_size = step_size_to_attempt;
while (!Step(adjusted_step_size)) {
DRAKE_LOGGER_DEBUG("Sub-step failed at {}", adjusted_step_size);
adjusted_step_size *= subdivision_factor_;
// Note: we could give the user more rope to hang themselves by looking
// for zero rather than machine epsilon, which might be advantageous if
// the user were modeling systems over extremely small time scales.
// However, that issue could be addressed instead by scaling units, and
// using machine epsilon allows failure to be detected much more rapidly.
if (adjusted_step_size < std::numeric_limits<double>::epsilon()) {
throw std::runtime_error("Integrator has been directed to a near zero-"
"length step in order to obtain convergence.");
}
ValidateSmallerStepSize(step_size_to_attempt, adjusted_step_size);
++num_shrinkages_from_substep_failures_;
++num_substep_failures_;
if (get_dense_output()) {
// Take dense output one step back to undo
// the last integration step.
dense_output_->RemoveFinalSegment();
}
}
step_size_to_attempt = adjusted_step_size;
//--------------------------------------------------------------------
T err_norm = CalcStateChangeNorm(*get_error_estimate());
T next_step_size;
std::tie(step_succeeded, next_step_size) = CalcAdjustedStepSize(
err_norm, step_size_to_attempt, &at_minimum_step_size);
DRAKE_LOGGER_DEBUG("Succeeded? {}, Next step size: {}",
step_succeeded, next_step_size);
if (step_succeeded) {
// Only update the next step size (retain the previous one) if the
// step size was not artificially limited.
if (!h_was_artificially_limited)
ideal_next_step_size_ = next_step_size;
if (isnan(get_actual_initial_step_size_taken()))
set_actual_initial_step_size_taken(step_size_to_attempt);
// Record the adapted step size taken.
if (isnan(get_smallest_adapted_step_size_taken()) ||
(step_size_to_attempt < get_smallest_adapted_step_size_taken() &&
step_size_to_attempt < h_max))
set_smallest_adapted_step_size_taken(step_size_to_attempt);
} else {
++num_shrinkages_from_error_control_;
// Set the next step size to attempt.
step_size_to_attempt = next_step_size;
// Reset the time, state, and time derivative at t0.
get_mutable_context()->SetTime(current_time);
xc.SetFromVector(xc0_save_);
if (get_dense_output()) {
// Take dense output one step back to undo
// the last integration step.
dense_output_->RemoveFinalSegment();
}
}
} while (!step_succeeded);
return static_cast<bool>(step_size_to_attempt == h_max);
}
template <class T>
T IntegratorBase<T>::CalcStateChangeNorm(
const ContinuousState<T>& dx_state) const {
using std::max;
const Context<T>& context = get_context();
const System<T>& system = get_system();
// Get weighting matrices.
const auto& qbar_v_weight = this->get_generalized_state_weight_vector();
const auto& z_weight = this->get_misc_state_weight_vector();
// Get the differences in the generalized position, velocity, and
// miscellaneous continuous state vectors.
const VectorBase<T>& dgq = dx_state.get_generalized_position();
const VectorBase<T>& dgv = dx_state.get_generalized_velocity();
const VectorBase<T>& dgz = dx_state.get_misc_continuous_state();
// (re-)Initialize pinvN_dq_change_ and weighted_q_change_, if necessary.
// Reinitialization might be required if the system state variables can
// change during the course of the simulation.
if (pinvN_dq_change_ == nullptr) {
pinvN_dq_change_ = std::make_unique<BasicVector<T>>(dgv.size());
weighted_q_change_ = std::make_unique<BasicVector<T>>(dgq.size());
}
DRAKE_DEMAND(pinvN_dq_change_->size() == dgv.size());
DRAKE_DEMAND(weighted_q_change_->size() == dgq.size());
// TODO(edrumwri): Acquire characteristic time properly from the system
// (i.e., modify the System to provide this value).
const double characteristic_time = 1.0;
// Computes the infinity norm of the weighted velocity variables.
unweighted_substate_change_ = dgv.CopyToVector();
T v_nrm = qbar_v_weight.cwiseProduct(unweighted_substate_change_).
template lpNorm<Eigen::Infinity>() * characteristic_time;
// Compute the infinity norm of the weighted auxiliary variables.
unweighted_substate_change_ = dgz.CopyToVector();
T z_nrm = (z_weight.cwiseProduct(unweighted_substate_change_))
.template lpNorm<Eigen::Infinity>();
// Compute N * Wq * dq = N * Wꝗ * N+ * dq.
unweighted_substate_change_ = dgq.CopyToVector();
system.MapQDotToVelocity(context, unweighted_substate_change_,
pinvN_dq_change_.get());
system.MapVelocityToQDot(
context, qbar_v_weight.cwiseProduct(pinvN_dq_change_->CopyToVector()),
weighted_q_change_.get());
T q_nrm = weighted_q_change_->CopyToVector().
template lpNorm<Eigen::Infinity>();
DRAKE_LOGGER_DEBUG("dq norm: {}, dv norm: {}, dz norm: {}",
q_nrm, v_nrm, z_nrm);
// Return NaN if one of the values is NaN (whether std::max does this is
// dependent upon ordering!)
using std::isnan;
if (isnan(q_nrm) || isnan(v_nrm) || isnan(z_nrm))
return std::numeric_limits<T>::quiet_NaN();
// TODO(edrumwri): Record the worst offender (which of the norms resulted
// in the largest value).
// Infinity norm of the concatenation of multiple vectors is equal to the
// maximum of the infinity norms of the individual vectors.
return max(z_nrm, max(q_nrm, v_nrm));
}
template <class T>
std::pair<bool, T> IntegratorBase<T>::CalcAdjustedStepSize(
const T& err,
const T& step_taken,
bool* at_minimum_step_size) const {
using std::pow;
using std::min;
using std::max;
using std::isnan;
using std::isinf;
// Magic numbers come from Simbody.
const double kSafety = 0.9;
const double kMinShrink = 0.1;
const double kMaxGrow = 5.0;
const double kHysteresisLow = 0.9;
const double kHysteresisHigh = 1.2;
// Get the order for the integrator's error estimate.
const int err_order = get_error_estimate_order();
// Set value for new step size to invalid value initially.
T new_step_size(-1);
// First, make a guess at the next step size to use based on
// the supplied error norm. Watch out for NaN. Further adjustments will be
// made in blocks of code that follow.
if (isnan(err) || isinf(err)) { // e.g., integrand returned NaN.
new_step_size = kMinShrink * step_taken;
return std::make_pair(false, new_step_size);
} else {
if (err == 0) { // A "perfect" step; can happen if no dofs for example.
new_step_size = kMaxGrow * step_taken;
} else { // Choose best step for skating just below the desired accuracy.
new_step_size = kSafety * step_taken *
pow(get_accuracy_in_use() / err, 1.0 / err_order);
}
}
// Error indicates that the step size can be increased.
if (new_step_size > step_taken) {
// If the integrator has been directed down to the minimum step size, but
// now error indicates that the step size can be increased, de-activate
// at_minimum_step_size.
*at_minimum_step_size = false;
// If the new step is bigger than the old, don't make the change if the
// old one was small for some unimportant reason (like reached a publishing
// interval). Also, don't grow the step size if the change would be very
// small; better to keep the step size stable in that case (maybe just
// for aesthetic reasons).
if (new_step_size < kHysteresisHigh * step_taken)
new_step_size = step_taken;
}
// If error indicates that we should shrink the step size but are not allowed
// to, quit and indicate that the step was successful.
if (new_step_size < step_taken && *at_minimum_step_size) {
return std::make_pair(true, step_taken);
}
// If we're supposed to shrink the step size but the one we have actually
// achieved the desired accuracy last time, we won't change the step now.
// Otherwise, if we are going to shrink the step, let's not be shy -- we'll
// shrink it by at least a factor of kHysteresisLow.
if (new_step_size < step_taken) {
if (err <= get_accuracy_in_use()) {
new_step_size = step_taken; // not this time
} else {
T test_value = kHysteresisLow * step_taken;
new_step_size = min(new_step_size, test_value);
}
}
// Keep the size change within the allowable bounds.
T max_grow_step = kMaxGrow * step_taken;
T min_shrink_step = kMinShrink * step_taken;
new_step_size = min(new_step_size, max_grow_step);
new_step_size = max(new_step_size, min_shrink_step);
// Apply user-requested limits on min and max step size.
// TODO(edrumwri): Introduce some feedback to the user when integrator wants
// to take a smaller step than user has selected as the minimum. Options for
// this feedback could include throwing a special exception, logging, setting
// a flag in the integrator that allows throwing an exception, or returning
// a special status from IntegrateNoFurtherThanTime().
if (!isnan(get_maximum_step_size()))
new_step_size = min(new_step_size, get_maximum_step_size());
ValidateSmallerStepSize(step_taken, new_step_size);
// Increase the next step size, as necessary.
new_step_size = max(new_step_size, get_working_minimum_step_size());
if (new_step_size == get_working_minimum_step_size()) {
// Indicate that the step is integrator is now trying the minimum step
// size.
*at_minimum_step_size = true;
// If the integrator wants to shrink the step size below the
// minimum allowed and exceptions are suppressed, indicate that status.
if (new_step_size < step_taken)
return std::make_pair(false, new_step_size);
}
return std::make_pair(
static_cast<bool>(new_step_size >= step_taken),
new_step_size);
}
template <class T>
typename IntegratorBase<T>::StepResult
IntegratorBase<T>::IntegrateNoFurtherThanTime(
const T& publish_time, const T& update_time, const T& boundary_time) {
if (!IntegratorBase<T>::is_initialized())
throw std::logic_error("Integrator not initialized.");
// Now that integrator has been checked for initialization, get the current
// time.
const T t0 = context_->get_time();
// Verify that h's are non-negative.
const T publish_dt = publish_time - t0;
const T update_dt = update_time - t0;
const T boundary_dt = boundary_time - t0;
if (publish_dt < 0.0)
throw std::logic_error("Publish h is negative.");
if (update_dt < 0.0)
throw std::logic_error("Update h is negative.");
if (boundary_dt < 0.0)
throw std::logic_error("Boundary h is negative.");
// The size of the integration step is the minimum of the time until the next
// update event, the time until the next publish event, the boundary time
// (i.e., the maximum time that the user wished to step to), and the maximum
// step size (which may stretch slightly to hit a discrete event).
// We report to the caller which event ultimately constrained the step size.
// If multiple events constrained it equally, we prefer to report update
// events over publish events, publish events over boundary step limits,
// and boundary limits over maximum step size limits. The caller must
// determine event simultaneity by inspecting the time.
// The maintainer of this code is advised to consider that, while updates
// and boundary times, may both conceptually be deemed events, the distinction
// is made for a reason. If both an update and a boundary time occur
// simultaneously, the following behavior should result:
// (1) kReachedUpdateTime is returned, (2) Simulator::AdvanceTo() performs the
// necessary update, (3) IntegrateNoFurtherThanTime() is called with
// boundary_time equal to the current time in the context and returns
// kReachedBoundaryTime, and (4) the simulation terminates. This sequence of
// operations will ensure that the simulation state is valid if
// Simulator::AdvanceTo() is called again to advance time further.
// We now analyze the following simultaneous cases with respect to Simulator:
//
// { publish, update }
// kReachedUpdateTime will be returned, an update will be followed by a
// publish.
//
// { publish, update, max step }
// kReachedUpdateTime will be returned, an update will be followed by a
// publish.
//
// { publish, boundary time, max step }
// kReachedPublishTime will be returned, a publish will be performed followed
// by another call to this function, which should return kReachedBoundaryTime
// (followed in rapid succession by AdvanceTo(.) return).
//
// { publish, boundary time, max step }
// kReachedPublishTime will be returned, a publish will be performed followed
// by another call to this function, which should return kReachedBoundaryTime
// (followed in rapid succession by AdvanceTo(.) return).
//
// { publish, update, boundary time, maximum step size }
// kUpdateTimeReached will be returned, an update followed by a publish
// will then be performed followed by another call to this function, which
// should return kReachedBoundaryTime (followed in rapid succession by
// AdvanceTo(.) return).
// By default, the target time is that of the next discrete update event.
StepResult candidate_result = IntegratorBase<T>::kReachedUpdateTime;
T target_time = update_time;
// If the next discrete publish event is sooner than the next discrete update
// event, the time of the publish event becomes the target time.
if (publish_time < update_time) {
candidate_result = IntegratorBase<T>::kReachedPublishTime;
target_time = publish_time;
}
// If the stop time (boundary time) is sooner than the candidate, use it
// instead.
if (boundary_time < target_time) {
candidate_result = IntegratorBase<T>::kReachedBoundaryTime;
target_time = boundary_time;
}
// If there is no continuous state, there will be no need to limit the
// integration step size.
if (get_context().num_continuous_states() == 0) {
Context<T>* context = get_mutable_context();
context->SetTime(target_time);
return candidate_result;
}
// If all events are further into the future than the maximum step
// size times a stretch factor of 1.01, the maximum time becomes the
// target time. Put another way, if the maximum step occurs right before
// an update or a publish, the update or publish is done instead. In contrast,
// we never step past boundary_time, even if doing so would allow hitting a
// publish or an update.
const bool reached_boundary =
(candidate_result == IntegratorBase<T>::kReachedBoundaryTime);
const T& max_h = this->get_maximum_step_size();
const T max_integrator_time = t0 + max_h;
if ((reached_boundary && max_integrator_time < target_time) ||
(!reached_boundary && t0 + max_h * get_stretch_factor() < target_time)) {
candidate_result = IntegratorBase<T>::kTimeHasAdvanced;
target_time = max_integrator_time;
}
T h = target_time - t0;
if (h < 0.0) throw std::logic_error("Negative h.");
// If error control is disabled, call the generic stepper. Otherwise, use
// the error controlled method.
bool full_step = true;
if (this->get_fixed_step_mode()) {
T adjusted_h = h;
while (!Step(adjusted_h)) {
++num_shrinkages_from_substep_failures_;
++num_substep_failures_;
adjusted_h *= subdivision_factor_;
ValidateSmallerStepSize(h, adjusted_h);
full_step = false;
}
} else {
full_step = StepOnceErrorControlledAtMost(h);
}
// Update generic statistics.
const T actual_h = context_->get_time() - t0;
UpdateStepStatistics(actual_h);
if (full_step || context_->get_time() >= target_time) {
// Correct any rounding error that may have caused the time to overrun
// the target time.
context_->SetTime(target_time);
// If the integrator took the entire maximum step size we allowed above,
// we report to the caller that a step constraint was hit, which may
// indicate a discrete event has arrived.
return candidate_result;
} else {
// Otherwise, we expect that time has advanced, but no event has arrived.
return IntegratorBase<T>::kTimeHasAdvanced;
}
}
} // namespace systems
} // namespace drake
DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_SCALARS(
class drake::systems::IntegratorBase)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/simulator_status.cc | #include "drake/systems/analysis/simulator_status.h"
#include <string>
#include <utility>
#include <fmt/format.h>
namespace drake {
namespace systems {
/** Returns a human-readable message explaining the return result. */
std::string SimulatorStatus::FormatMessage() const {
if (reason() == kReachedBoundaryTime) {
DRAKE_DEMAND(return_time() == boundary_time());
return fmt::format(
"Simulator successfully reached the boundary time ({}).",
boundary_time());
}
// Equality is unlikely but allowed in case a termination request happens
// at exactly the boundary time.
DRAKE_DEMAND(return_time() <= boundary_time());
// Attempt to identify the relevant subsystem in human-readable terms. If no
// subsystem was provided we just call it "System". Otherwise, we obtain its
// type and its name and call it "type System 'name'", e.g.
// "MultibodyPlant<double> System 'my_plant'".
const std::string system_id =
system() == nullptr
? "System"
: fmt::format(
"{} System '{}'",
NiceTypeName::RemoveNamespaces(system()->GetSystemType()),
system()->GetSystemPathname());
if (reason() == kReachedTerminationCondition) {
return fmt::format(
"Simulator returned early at time {} because {} requested termination "
"with message: \"{}\"",
return_time(), system_id, message());
}
DRAKE_DEMAND(reason() == kEventHandlerFailed);
return fmt::format(
"Simulator stopped at time {} because {} failed "
"with message: \"{}\"",
return_time(), system_id, message());
}
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/instantaneous_realtime_rate_calculator.h | #pragma once
#include <memory>
#include <optional>
#include "drake/common/timer.h"
namespace drake {
namespace systems {
namespace internal {
/* Utility class that computes the realtime rate achieved between time steps. */
class InstantaneousRealtimeRateCalculator {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(InstantaneousRealtimeRateCalculator);
InstantaneousRealtimeRateCalculator() = default;
/* Computes the realtime rate which is the ratio of the amount of simulator
time to real world time that has passed between invocations.
The very first call to this function seeds the rate calculation and returns
nullopt because a valid rate cannot be computed yet. It will also return
nullopt if sim_time goes backwards.
@param current_sim_time the current simulated time.
@return realtime rate if one can be calculated, nullopt otherwise.
*/
std::optional<double> UpdateAndRecalculate(double current_sim_time);
/* Resets the internal state of `this` rate calculator. After a call, the next
call to UpdateAndRecalculate() will re-seed the rate calculation as if it was
the first call.
*/
void Reset() { prev_sim_time_ = std::nullopt; }
/* (Internal use for unit testing only) Used to mock the monotonic wall time
source to control time during unit testing. */
#ifndef DRAKE_DOXYGEN_CXX
void InjectMockTimer(std::unique_ptr<Timer>);
#endif
private:
std::optional<double> prev_sim_time_;
std::unique_ptr<Timer> timer_{std::make_unique<SteadyTimer>()};
};
} // namespace internal
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/simulator_config_functions.cc | #include "drake/systems/analysis/simulator_config_functions.h"
#include <cctype>
#include <initializer_list>
#include <memory>
#include <stdexcept>
#include <utility>
#include "drake/common/drake_throw.h"
#include "drake/common/never_destroyed.h"
#include "drake/common/nice_type_name.h"
#include "drake/common/unused.h"
#include "drake/systems/analysis/bogacki_shampine3_integrator.h"
#include "drake/systems/analysis/explicit_euler_integrator.h"
#include "drake/systems/analysis/implicit_euler_integrator.h"
#include "drake/systems/analysis/radau_integrator.h"
#include "drake/systems/analysis/runge_kutta2_integrator.h"
#include "drake/systems/analysis/runge_kutta3_integrator.h"
#include "drake/systems/analysis/runge_kutta5_integrator.h"
#include "drake/systems/analysis/semi_explicit_euler_integrator.h"
#include "drake/systems/analysis/velocity_implicit_euler_integrator.h"
#include "drake/systems/framework/leaf_system.h"
namespace drake {
namespace systems {
namespace {
using std::function;
using std::pair;
using std::string;
using std::vector;
// A functor that implements ResetIntegrator.
template <typename T>
using ResetIntegratorFunc =
function<IntegratorBase<T>*(Simulator<T>*, const T& /* max_step_size */)>;
// Returns (scheme, functor) pair that implements ResetIntegrator.
template <typename T>
using NamedResetIntegratorFunc =
pair<string, ResetIntegratorFunc<T>>;
// Converts the class name of the `Integrator` template argument into a string
// name for the scheme, e.g., FooBarIntegrator<double> becomes "foo_bar".
template <template <typename> class Integrator>
string GetIntegratorName() {
// Get the class name, e.g., FooBarIntegrator<double>.
string full_name = NiceTypeName::Get<Integrator<double>>();
string class_name = NiceTypeName::RemoveNamespaces(full_name);
if (class_name == "RadauIntegrator<double,1>") {
class_name = "Radau1Integrator<double>";
} else if (class_name == "RadauIntegrator<double,2>") {
class_name = "Radau3Integrator<double>";
}
// Strip off "Integrator<double>" suffix to leave just "FooBar".
const string suffix = "Integrator<double>";
DRAKE_DEMAND(class_name.size() > suffix.size());
const size_t suffix_begin = class_name.size() - suffix.size();
DRAKE_DEMAND(class_name.substr(suffix_begin) == suffix);
const string camel_name = class_name.substr(0, suffix_begin);
// Convert "FooBar to "foo_bar".
string result;
for (char ch : camel_name) {
if (std::isupper(ch)) {
if (!result.empty()) { result.push_back('_'); }
result.push_back(std::tolower(ch));
} else {
result.push_back(ch);
}
}
return result;
}
// A hollow shell of a System. TODO(jeremy.nimmer) Move into drake primitives.
template <typename T>
class DummySystem final : public LeafSystem<T> {
public:
DummySystem() {}
};
// N.B In a roundabout way the string returned here is generated by
// GetIntegratorName().
template <typename T>
string GetIntegrationSchemeName(const IntegratorBase<T>& integrator) {
const string current_type = NiceTypeName::Get(integrator);
Simulator<T> dummy_simulator(std::make_unique<DummySystem<T>>());
for (const auto& scheme : GetIntegrationSchemes()) {
ResetIntegratorFromFlags(&dummy_simulator, scheme, T(0.001));
if (NiceTypeName::Get(dummy_simulator.get_integrator()) == current_type) {
return scheme;
}
}
throw std::runtime_error(
"Unrecognized integration scheme " + current_type);
}
// Returns (scheme, functor) pair to implement reset for this `Integrator`.
// This would be much simpler if all integrators accepted a max_step_size.
template <typename T, template <typename> class Integrator>
NamedResetIntegratorFunc<T> MakeResetter() {
constexpr bool is_fixed_step = std::is_constructible_v<
Integrator<T>,
const System<T>&, T, Context<T>*>;
constexpr bool is_error_controlled = std::is_constructible_v<
Integrator<T>,
const System<T>&, Context<T>*>;
static_assert(is_fixed_step ^ is_error_controlled);
return NamedResetIntegratorFunc<T>(
GetIntegratorName<Integrator>(),
[](Simulator<T>* simulator, const T& max_step_size) {
if constexpr (is_fixed_step) {
IntegratorBase<T>& result =
simulator->template reset_integrator<Integrator<T>>(
max_step_size);
return &result;
} else {
IntegratorBase<T>& result =
simulator->template reset_integrator<Integrator<T>>();
result.set_maximum_step_size(max_step_size);
return &result;
}
});
}
// Returns the full list of supported (scheme, functor) pairs. N.B. The list
// here must be kept in sync with the help string in simulator_gflags.cc.
template <typename T>
const vector<NamedResetIntegratorFunc<T>>& GetAllNamedResetIntegratorFuncs() {
static const never_destroyed<vector<NamedResetIntegratorFunc<T>>> result{
std::initializer_list<NamedResetIntegratorFunc<T>>{
// Keep this list sorted alphabetically.
MakeResetter<T, BogackiShampine3Integrator>(),
MakeResetter<T, ExplicitEulerIntegrator>(),
MakeResetter<T, ImplicitEulerIntegrator>(),
MakeResetter<T, Radau1Integrator>(),
MakeResetter<T, Radau3Integrator>(),
MakeResetter<T, RungeKutta2Integrator>(),
MakeResetter<T, RungeKutta3Integrator>(),
MakeResetter<T, RungeKutta5Integrator>(),
MakeResetter<T, SemiExplicitEulerIntegrator>(),
MakeResetter<T, VelocityImplicitEulerIntegrator>(),
}};
return result.access();
}
} // namespace
template <typename T>
IntegratorBase<T>& ResetIntegratorFromFlags(
Simulator<T>* simulator,
const string& scheme,
const T& max_step_size) {
DRAKE_THROW_UNLESS(simulator != nullptr);
const auto& name_func_pairs = GetAllNamedResetIntegratorFuncs<T>();
for (const auto& [one_name, one_func] : name_func_pairs) {
if (scheme == one_name) {
return *one_func(simulator, max_step_size);
}
}
throw std::runtime_error(fmt::format(
"Unknown integration scheme: {}", scheme));
}
const vector<string>& GetIntegrationSchemes() {
static const never_destroyed<vector<string>> result{[]() {
vector<string> names;
const auto& name_func_pairs = GetAllNamedResetIntegratorFuncs<double>();
for (const auto& [one_name, one_func] : name_func_pairs) {
names.push_back(one_name);
unused(one_func);
}
return names;
}()};
return result.access();
}
template <typename T>
void ApplySimulatorConfig(
const SimulatorConfig& config,
Simulator<T>* simulator) {
DRAKE_THROW_UNLESS(simulator != nullptr);
IntegratorBase<T>& integrator = ResetIntegratorFromFlags(
simulator, config.integration_scheme, T(config.max_step_size));
if (integrator.supports_error_estimation()) {
integrator.set_fixed_step_mode(!config.use_error_control);
}
if (!integrator.get_fixed_step_mode()) {
integrator.set_target_accuracy(config.accuracy);
}
simulator->set_target_realtime_rate(config.target_realtime_rate);
// It is almost always the case we want these two next flags to be either both
// true or both false. Otherwise we could miss the first publish at t = 0.
simulator->set_publish_at_initialization(config.publish_every_time_step);
simulator->set_publish_every_time_step(config.publish_every_time_step);
}
template <typename T>
SimulatorConfig ExtractSimulatorConfig(const Simulator<T>& simulator) {
SimulatorConfig result;
const IntegratorBase<T>& integrator = simulator.get_integrator();
result.integration_scheme = GetIntegrationSchemeName(integrator);
result.max_step_size =
ExtractDoubleOrThrow(integrator.get_maximum_step_size());
if (integrator.supports_error_estimation()) {
result.use_error_control = !integrator.get_fixed_step_mode();
const double accuracy_in_use =
ExtractDoubleOrThrow(integrator.get_accuracy_in_use());
DRAKE_DEMAND(!std::isnan(accuracy_in_use));
result.accuracy = accuracy_in_use;
} else {
result.use_error_control = false;
result.accuracy = 0.0;
}
result.target_realtime_rate =
ExtractDoubleOrThrow(simulator.get_target_realtime_rate());
result.publish_every_time_step = simulator.get_publish_every_time_step();
return result;
}
// We can't support T=symbolic::Expression because Simulator doesn't support it.
DRAKE_DEFINE_FUNCTION_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS((
&ResetIntegratorFromFlags<T>,
&ApplySimulatorConfig<T>,
&ExtractSimulatorConfig<T>
))
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/scalar_view_dense_output.cc | #include "drake/systems/analysis/scalar_view_dense_output.h"
DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_SCALARS(
class drake::systems::ScalarViewDenseOutput)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/radau_integrator.h | #pragma once
#include <memory>
#include <vector>
#include "drake/common/autodiff.h"
#include "drake/common/drake_copyable.h"
#include "drake/systems/analysis/bogacki_shampine3_integrator.h"
#include "drake/systems/analysis/implicit_integrator.h"
#include "drake/systems/analysis/runge_kutta2_integrator.h"
namespace drake {
namespace systems {
/**
* A selectable order (third- or first-order), fully implicit integrator with
* error estimation.
*
* @tparam_nonsymbolic_scalar
* @tparam num_stages The number of stages used in this integrator, which must
* be either 1 or 2. Set this to 1 for the integrator to be implicit
* Euler and 2 for it to Radau3 (default).
*
* A two-stage Radau IIa (see [Hairer, 1996], Ch. 5) method is used for
* propagating the state forward, by default. The state can also be propagated
* using a single-stage method, in which case it is equivalent to an implicit
* Euler method, by setting num_stages=1. Regardless of the order of propagating
* state, the local (truncation) error is estimated through the implicit
* trapezoid rule.
*
* Radau IIa methods are known to be L-Stable, meaning both that
* applying it at a fixed integration step to the "test" equation `y(t) = eᵏᵗ`
* yields zero (for `k < 0` and `t → ∞`) *and* that it is also A-Stable.
* A-Stability, in turn, means that the method can integrate the linear constant
* coefficient system `dx/dt = Ax` at any step size without the solution
* becoming unstable (growing without bound). The practical effect of
* L-Stability is that the integrator tends to be stable for any given step size
* on an arbitrary system of ordinary differential equations. Note that the
* implicit trapezoid rule used for error estimation is "only" A-Stable; whether
* this lesser stability has some practical effect on the efficiency of this
* integrator is currently unknown. See [Lambert, 1991], Ch. 6 for an
* approachable discussion on stiff differential equations and L- and
* A-Stability.
*
* This implementation uses Newton-Raphson (NR). General implementation
* details were taken from [Hairer, 1996] Ch. 8.
*
* - [Hairer, 1996] E. Hairer and G. Wanner. Solving Ordinary Differential
* Equations II (Stiff and Differential-Algebraic Problems).
* Springer, 1996.
* - [Lambert, 1991] J. D. Lambert. Numerical Methods for Ordinary Differential
* Equations. John Wiley & Sons, 1991.
*
* @see ImplicitIntegrator class documentation for information about implicit
* integration methods in general.
* @see Radau3Integrator and Radau1Integrator alises for third- and first-order
* templates with num_stages already specified.
* @note This integrator uses the integrator accuracy setting, even when run
* in fixed-step mode, to limit the error in the underlying Newton-Raphson
* process. See IntegratorBase::set_target_accuracy() for more info.
* @ingroup integrators
*/
template <typename T, int num_stages = 2>
class RadauIntegrator final : public ImplicitIntegrator<T> {
static_assert(num_stages == 1 || num_stages == 2,
"Only 1-stage and 2-stage Radau are supported.");
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(RadauIntegrator)
explicit RadauIntegrator(const System<T>& system,
Context<T>* context = nullptr);
~RadauIntegrator() final = default;
bool supports_error_estimation() const final { return true; }
/// This integrator uses embedded second order methods to compute estimates of
/// the local truncation error. The order of the asymptotic difference between
/// the third-order Radau method and an embedded second order method is O(h³).
/// The order of the asymptotic difference between the first-order Radau
/// method and an embedded second order method is O(h²).
int get_error_estimate_order() const final {
if (num_stages == 2) {
return 3;
} else {
DRAKE_DEMAND(num_stages == 1);
return 2;
}
}
private:
int64_t do_get_num_newton_raphson_iterations() const final {
return num_nr_iterations_;
}
int64_t do_get_num_error_estimator_derivative_evaluations() const final {
return num_err_est_function_evaluations_;
}
int64_t do_get_num_error_estimator_derivative_evaluations_for_jacobian()
const final {
return num_err_est_jacobian_function_evaluations_;
}
int64_t do_get_num_error_estimator_newton_raphson_iterations()
const final {
return num_err_est_nr_iterations_;
}
int64_t do_get_num_error_estimator_jacobian_evaluations() const final {
return num_err_est_jacobian_reforms_;
}
int64_t do_get_num_error_estimator_iteration_matrix_factorizations()
const final {
return num_err_est_iter_factorizations_;
}
// Computes the solution xtplus (i.e., the continuous state at t0 + h) from
// the continuous state at t0 (xt0) and the current Newton-Raphson
// iterate (Z).
void ComputeSolutionFromIterate(
const VectorX<T>& xt0, const VectorX<T>& Z, VectorX<T>* xtplus) const;
// Updates the error estimate from the propagated solution and the embedded
// solution.
void ComputeAndSetErrorEstimate(
const VectorX<T>& xtplus_prop, const VectorX<T>& xtplus_embed);
// Steps Radau forward by h, if possible.
// @param t0 the initial time.
// @param h the integration step size to attempt.
// @param xt0 the continuous state at time t0.
// @param[out] xtplus_radau contains the Radau integrator solution on return.
// @param[out] xtplus_itr contains the implicit trapezoid solution on return.
// @returns `true` if the integration was successful at the requested step
// size.
// @pre The time and state in the system's context (stored by the integrator)
// are set to (t0, xt0) on entry.
// @post The time and state of the system's context (stored by the integrator)
// will be set to t0+h and `xtplus_radau` on successful exit (indicated
// by this function returning `true`) and will be indeterminate on
// unsuccessful exit (indicated by this function returning `false`).
bool AttemptStepPaired(const T& t0, const T& h, const VectorX<T>& xt0,
VectorX<T>* xtplus_radau, VectorX<T>* xtplus_itr);
// Computes F(Z) used in [Hairer, 1996], (IV.8.4). This method evaluates
// the time derivatives of the system given the current iterate Z.
// @param t0 the initial time.
// @param h the integration step size to attempt.
// @param xt0 the continuous state at time t0.
// @param Z the current iterate, of dimension state_dim * num_stages.
// @post the state of the internal context will be set to (t0, xt0) on return.
// @return a (state_dim * num_stages)-dimensional vector.
const VectorX<T>& ComputeFofZ(
const T& t0, const T& h, const VectorX<T>& xt0, const VectorX<T>& Z);
void DoInitialize() final;
void DoResetCachedJacobianRelatedMatrices() final {
iteration_matrix_radau_ = {};
iteration_matrix_implicit_trapezoid_ = {};
}
void DoResetImplicitIntegratorStatistics() final;
// Takes a given step of the requested size, if possible.
// @param h the integration step size to attempt.
// @returns `true` if successful.
// @post the time and continuous state will be advanced only if `true` is
// returned (if `false` is returned, the time and state will be reset
// to their values on entry).
bool DoImplicitIntegratorStep(const T& h) final;
// Computes the next continuous state (at t0 + h) using the Radau method,
// assuming that the method is able to converge at that step size.
// @param t0 the initial time.
// @param h the integration step size to attempt.
// @param xt0 the continuous state at time t0.
// @param[out] xtplus the value for x(t+h) on return.
// @param trial the attempt for this approach (1-4). StepRadau() uses more
// computationally expensive methods as the trial numbers increase.
// @post the internal context will be in an indeterminate state on returning
// `false`.
// @returns `true` if the method was successfully able to take an integration
// step of size `h`.
bool StepRadau(const T& t0, const T& h, const VectorX<T>& xt0,
VectorX<T>* xtplus, int trial = 1);
// Computes the next continuous state (at t0 + h) using the implicit trapezoid
// method, assuming that the method is able to converge at that step size.
// @param t0 the initial time.
// @param h the integration step size to attempt.
// @param xt0 the continuous state at time t0.
// @param dx0 the time derivatives computed at time and state (t0, xt0).
// @param xtplus_radau the Radau solution for x(t+h).
// @param[out] xtplus the value for x(t+h) on return.
// @returns `true` if the method was successfully able to take an integration
// step of size `h`.
bool StepImplicitTrapezoid(const T& t0, const T& h, const VectorX<T>& xt0,
const VectorX<T>& dx0, const VectorX<T>& xtplus_radau,
VectorX<T>* xtplus);
// Computes the tensor product between two matrices. Given
// A = | a11 ... a1m |
// | ... ... |
// | an1 ... anm |
// and some matrix B, the tensor product is:
// A ⊗ B = | a11B ... a1mB |
// | ... ... |
// | an1B ... anmB |
static MatrixX<T> CalcTensorProduct(const MatrixX<T>& A, const MatrixX<T>& B);
static void ComputeImplicitTrapezoidIterationMatrix(const MatrixX<T>& J,
const T& h,
typename ImplicitIntegrator<T>::IterationMatrix* iteration_matrix);
// Function for computing the iteration matrix for the Radau method. This
// is the matrix in [Hairer, 1996] (IV.8.4) on p.119.
static void ComputeRadauIterationMatrix(const MatrixX<T>& J, const T& h,
const MatrixX<double>& A,
typename ImplicitIntegrator<T>::IterationMatrix* iteration_matrix);
// Does all of the real work for the implicit trapezoid method.
bool StepImplicitTrapezoidDetail(const T& t0, const T& h,
const VectorX<T>& xt0, const std::function<VectorX<T>()>& g,
const VectorX<T>& xtplus_radau, VectorX<T>* xtplus, int trial = 1);
// The num_stages-dimensional (constant) vector of time-scaling coefficients
// common to Runge-Kutta-type integrators.
std::vector<double> c_;
// The num_stages x num_stages-dimensional (constant) matrix of stage-scaling
// coefficients that are standard with Runge-Kutta-type integrators.
MatrixX<double> A_;
// The iteration matrix for the Radau method.
typename ImplicitIntegrator<T>::IterationMatrix iteration_matrix_radau_;
// The iteration matrix for the implicit trapezoid method.
typename ImplicitIntegrator<T>::IterationMatrix
iteration_matrix_implicit_trapezoid_;
// The (constant) tensor product between A_ and an identity matrix. This
// product is computed only at initialization.
MatrixX<T> A_tp_eye_;
// The num_stages-dimensional (constant) solution propagation coefficients
// (that also scales the stages) common to Runge-Kutta-type integrators.
std::vector<double> b_;
// The num_stages-dimensional (constant) scaling coefficients for Z (IV.8.2b)
// in [Hairer, 1996].
std::vector<double> d_;
// A num_stages * |xc|-dimensional vector of the current iterate for the
// Newton-Raphson process.
VectorX<T> Z_;
// The num_stages dimensional vector of derivative evaluations at every stage.
VectorX<T> F_of_Z_;
// Vector used in error estimate calculations.
VectorX<T> err_est_vec_;
// The continuous state update vector used during Newton-Raphson.
std::unique_ptr<ContinuousState<T>> dx_state_;
// Continuous state at the beginning of an integration step (stored to avoid
// heap allocations).
VectorX<T> xt0_;
// Time-derivative of continuous state (stored to avoid heap allocations).
VectorX<T> xdot_;
// "Propagated" solution computed by the integrator (stored to avoid heap
// allocations)- this is the solution that will be propagated forward in time.
VectorX<T> xtplus_prop_;
// "Error estimate" solution computed by the embedded method (stored to avoid
// allocations)- this is the solution that will be used in concert with the
// propagated solution to compute the error estimate.
VectorX<T> xtplus_embed_;
// 3/2 Bogacki-Shampine integrator used for propagation and error estimation
// when the step size becomes smaller than the working minimum step size.
std::unique_ptr<BogackiShampine3Integrator<T>> bs3_;
// Second order Runge-Kutta integrator used for error estimation when the
// step size becomes smaller than the working minimum step size.
std::unique_ptr<RungeKutta2Integrator<T>> rk2_;
// Statistics specific to this integrator.
int64_t num_nr_iterations_{0};
// Implicit trapezoid specific statistics.
int64_t num_err_est_jacobian_reforms_{0};
int64_t num_err_est_jacobian_function_evaluations_{0};
int64_t num_err_est_iter_factorizations_{0};
int64_t num_err_est_function_evaluations_{0};
int64_t num_err_est_nr_iterations_{0};
};
/** A third-order fully implicit integrator with error estimation.
See RadauIntegrator with `num_stages == 2` for details.
@tparam_nonsymbolic_scalar */
template <typename T>
using Radau3Integrator = RadauIntegrator<T, 2>;
/** A first-order fully implicit integrator with error estimation.
See RadauIntegrator with `num_stages == 1` for details.
@tparam_nonsymbolic_scalar */
template <typename T>
using Radau1Integrator = RadauIntegrator<T, 1>;
} // namespace systems
} // namespace drake
// Declare class template initializations for double and AutoDiffXd.
// Note: We don't use the macros in drake/common/default_scalars.h because
// those macros are designed for functions with only one template argument, and
// we need to instantiate both scalar types for both the Radau1 and Radau3
// integrators, which have num_stages set 1 and 2, respectively.
extern template class drake::systems::RadauIntegrator<double, 1>;
extern template class drake::systems::RadauIntegrator<drake::AutoDiffXd, 1>;
extern template class drake::systems::RadauIntegrator<double, 2>;
extern template class drake::systems::RadauIntegrator<drake::AutoDiffXd, 2>;
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/integrator_base.h | #pragma once
#include <algorithm>
#include <cmath>
#include <limits>
#include <memory>
#include <utility>
#include <vector>
#include "drake/common/default_scalars.h"
#include "drake/common/drake_assert.h"
#include "drake/common/drake_bool.h"
#include "drake/common/drake_copyable.h"
#include "drake/common/text_logging.h"
#include "drake/common/trajectories/piecewise_polynomial.h"
#include "drake/systems/framework/context.h"
#include "drake/systems/framework/system.h"
#include "drake/systems/framework/vector_base.h"
namespace drake {
namespace systems {
/** @addtogroup simulation
@{
@defgroup integrators Integrators
Apart from solving initial value problems, for which the integrator is a
key component of a simulator, integrators can also be used to solve
boundary value problems (via numerical methods like the Multiple Shooting
Method) and trajectory optimization problems (via numerical methods like
direct transcription). IntegratorBase and its derivatives were developed
primarily toward the former application (through
IntegratorBase::IntegrateNoFurtherThanTime() and the Simulator class).
However, the IntegratorBase architecture was developed to support these
ancillary applications as well using the
IntegratorBase::IntegrateWithMultipleStepsToTime() and
IntegratorBase::IntegrateWithSingleFixedStepToTime() methods; the latter
permits the caller to advance time using fixed steps in applications where
variable stepping would be deleterious (e.g., direct transcription).
@section integrator-selection Integrator selection
A natural question for a user to ask of an integrator is: Which scheme
(method) should be applied to a particular problem? The answer is whichever
one most quickly computes the solution to the desired accuracy! Selecting
an integration scheme for a particular problem is presently an artform. As
examples of some selection criteria: multistep methods (none of which are
currently implemented in Drake) generally work poorly when events (that
require state reinitializations) are common, symplectic methods generally
work well at maintaining stability for large integration steps, and stiff
integrators are often best for computationally stiff systems. If ignorant
as to the characteristics of a particular problem, it is often best to start
with an explicit, Runge-Kutta type method. Statistics collected by the
integrator can help diagnose performance issues and possibly point to the
use of a different integration scheme.
Some systems are known to exhibit "computational stiffness", by which it is
meant that (excessively) small integration steps are necessary for purposes
of stability: in other words, steps must be taken smaller than that
required to achieve a desired accuracy *over a particular interval*.
Thus, the nature of computationally stiff problems is that the solution to
the ODE is *smooth* in the interval of stiffness (in contrast, some problems
possess such high frequency dynamics that very small steps are simply
necessary to capture the solution accurately). Implicit integrators are the
go-to approach for solving computationally stiff problems, but careful
consideration is warranted. Implicit integrators typically require much more
computation than non-implicit (explicit) integrators, stiffness might be an
issue on only a very small time interval, and some problems might be only
"moderately stiff". Put another way, applying an implicit integrator to a
potentially stiff problem might not yield faster computation. The first
chapter of [Hairer, 1996] illustrates the issues broached in this paragraph
using various examples.
@section settings Integrator settings
IntegratorBase provides numerous settings and flags that can leverage
problem-specific information to speed integration and/or improve integration
accuracy. As an example, IntegratorBase::set_maximum_step_size() allows the
user to prevent overly large integration steps (that integration error
control alone might be insufficient to detect). As noted previously,
IntegratorBase also collects a plethora of statistics that can be used to
diagnose poor integration performance. For example, a large number of
shrinkages due to @ref error-estimation-and-control "error control" could
indicate that a system is computationally stiff. **Note that you might need
to alter the default settings to obtain desired performance even though we
have attempted to select reasonable defaults for many problems.**
See settings for @ref integrator-accuracy,
@ref integrator-maxstep "maximum step size",
@ref integrator-minstep "minimum step size", and
@ref weighting-state-errors "weighting state errors" for
in-depth information about the various performance settings shared across
integrators.
@section dense-sampling Dense sampling (interpolation)
For applications that require a more dense sampling of the system
continuous state than what would be available through either fixed or
error-controlled step integration (for a given accuracy), dense output
support is available (through IntegratorBase::StartDenseIntegration() and
IntegratorBase::StopDenseIntegration() methods). The accuracy and performance
of these outputs may vary with each integration scheme implementation.
@section references References
- [Hairer, 1996] E. Hairer and G. Wanner. Solving Ordinary Differential
Equations II (Stiff and Differential-Algebraic Problems).
Springer, 1996.
@}
*/
/**
An abstract class for an integrator for ODEs and DAEs as represented by a
Drake System. Integrators solve initial value problems of the form:<pre>
ẋ(t) = f(t, x(t)) with f : ℝ × ℝⁿ → ℝⁿ
</pre>
(i.e., `f()` is an ordinary differential equation) given initial conditions
(t₀, x₀). Thus, integrators advance the continuous state of a dynamical
system forward in time.
Drake's subclasses of IntegratorBase<T> should follow the naming pattern
`FooIntegrator<T>` by convention.
@tparam_default_scalar
@ingroup integrators
*/
template <class T>
class IntegratorBase {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(IntegratorBase)
/**
Status returned by IntegrateNoFurtherThanTime().
When a step is successful, it will return an indication of what caused it
to stop where it did. When unsuccessful it will throw an exception so you
won't see any return value. When return of control is due ONLY to reaching
a publish time, (status is kReachedPublishTime) the context may return an
interpolated value at an earlier time.
@note the simulation step must always end at an update time but can end
after a publish time.
*/
// TODO(edrumwri): incorporate kReachedZeroCrossing into the simulator.
enum StepResult {
/// Indicates a publish time has been reached but not an update time.
kReachedPublishTime = 1,
/// Localized an event; this is the *before* state (interpolated).
kReachedZeroCrossing = 2,
/// Indicates that integration terminated at an update time.
kReachedUpdateTime = 3,
/// User requested control whenever an internal step is successful.
kTimeHasAdvanced = 4,
/// Reached the desired integration time without reaching an update time.
kReachedBoundaryTime = 5,
/// Took maximum number of steps without finishing integrating over the
/// interval.
kReachedStepLimit = 6,
};
/**
Maintains references to the system being integrated and the context used
to specify the initial conditions for that system (if any).
@param system A reference to the system to be integrated; the integrator
will maintain a reference to the system in perpetuity, so
the integrator must not outlive the system.
@param context A pointer to a writeable context (nullptr is ok, but a
non-null pointer must be set before Initialize() is
called). The integrator will advance the system state using
the pointer to this context. The pointer to the context will
be maintained internally. The integrator must not outlive
the context.
*/
explicit IntegratorBase(const System<T>& system,
Context<T>* context = nullptr)
: system_(system), context_(context) {
initialization_done_ = false;
}
virtual ~IntegratorBase() = default;
/**
@anchor integrator-accuracy
@name Methods for getting and setting integrator accuracy
The precise meaning of *accuracy* is a complicated discussion, but it
translates roughly to the number of significant digits you want in the
results. By convention it is supplied as `10^-digits`, meaning that an
accuracy of 1e-3 provides about three significant digits. For more
discussion of accuracy, see @ref accuracy_and_tolerance and ref.
[[1]](https://dx.doi.org/10.1016/j.piutam.2011.04.023).
Integrators vary in the range of accuracy (loosest to tightest) that they
can support, and each integrator will choose a default accuracy to be used
that lies somewhere within this range and attempts to balance computation
and accuracy. If you request accuracy outside the supported range for the
chosen integrator it will be quietly adjusted to be in range. You can find
out the accuracy setting actually being used using `get_accuracy_in_use()`.
Implicit integrators additionally use the accuracy setting for determining
when the underlying Newton-Raphson root finding process has converged. For
those integrators, the accuracy setting also limits the allowable iteration
error in the Newton-Raphson process. Looser accuracy in that process
certainly implies greater error in the ODE solution and might impact the
stability of the solution negatively as well.
- [1] M. Sherman, A. Seth, S. Delp. Procedia IUTAM 2:241-261 (2011),
Section 3.3. https://dx.doi.org/10.1016/j.piutam.2011.04.023
@{
*/
/**
Request that the integrator attempt to achieve a particular accuracy for
the continuous portions of the simulation. Otherwise a default accuracy is
chosen for you. This may be ignored for fixed-step integration since
accuracy control requires variable step sizes. You should call
supports_error_estimation() to ensure that the
integrator supports this capability before calling this function; if
the integrator does not support it, this method will throw an exception.
@throws std::exception if integrator does not support error
estimation.
*/
// TODO(edrumwri): complain if integrator with error estimation wants to drop
// below the minimum step size
void set_target_accuracy(double accuracy) {
if (!supports_error_estimation())
throw std::logic_error(
"Integrator does not support accuracy estimation "
"and user has requested error control");
target_accuracy_ = accuracy;
accuracy_in_use_ = accuracy;
}
/**
Gets the target accuracy.
@sa get_accuracy_in_use()
*/
double get_target_accuracy() const { return target_accuracy_; }
/**
Gets the accuracy in use by the integrator. This number may differ from
the target accuracy if, for example, the user has requested an accuracy
not attainable or not recommended for the particular integrator.
*/
double get_accuracy_in_use() const { return accuracy_in_use_; }
// @}
/**
@anchor error-estimation-and-control
@name Methods related to error estimation and control
Established methods for integrating ordinary differential equations
invariably make provisions for estimating the "local error" (i.e., the
error over a small time interval) of a solution. Although the relationship
between local error and global error (i.e., the accumulated error over
multiple time steps) can be tenuous, such error estimates can allow
integrators to work adaptively, subdividing time intervals as necessary
(if, e.g., the system is particularly dynamic or stationary in an interval).
Even for applications that do not recommend such adaptive integration- like
direct transcription methods for trajectory optimization- error estimation
allows the user to assess the accuracy of the solution.
@{
*/
/**
Derived classes must override this function to indicate whether the
integrator supports error estimation. Without error estimation, the target
accuracy setting (see @ref integrator-accuracy "accuracy settings") will be
unused.
*/
virtual bool supports_error_estimation() const = 0;
/**
Derived classes must override this function to return the order of the
asymptotic term in the integrator's error estimate. An error estimator
approximates the truncation error in an integrator's solution. That
truncation error e(.) is approximated by a Taylor Series expansion in the
neighborhood around t:
@verbatim
e(t+h) ≈ e(t) + he(t) + he'(t) + ½h²e''(t) + ...
≈ e(t) + he(t) + he'(t) + ½h²e''(t) + O(h³)
@endverbatim
where we have replaced the "..." with the asymptotic error of all terms
truncated from the series.
Implementions should return the order of the asymptotic term in the Taylor
Series expansion around the expression for the error. For an integrator
that propagates a second-order solution and provides an estimate of the
error using an embedded first-order method, this method should return "2",
as can be seen in the derivation below, using y* as the true solution:
@verbatim
y̅ = y* + O(h³) [second order solution]
ŷ = y* + O(h²) [embedded first-order method]
e = (y̅ - ŷ) = O(h²)
@endverbatim
If the integrator does not provide an error estimate, the derived class
implementation should return 0.
*/
virtual int get_error_estimate_order() const = 0;
/**
Gets the error estimate (used only for integrators that support error
estimation). If the integrator does not support error estimation, nullptr
is returned.
*/
const ContinuousState<T>* get_error_estimate() const {
return err_est_.get();
}
/**
Return the step size the integrator would like to take next, based
primarily on the integrator's accuracy prediction. This value will not
be computed for integrators that do not support error estimation and
NaN will be returned.
*/
const T& get_ideal_next_step_size() const { return ideal_next_step_size_; }
/**
Sets an integrator with error control to fixed step mode. If the integrator
runs in fixed step mode, it will always take the maximum step size
directed (which may be that determined by get_maximum_step_size(), or may
be smaller, as directed by, e.g., Simulator for event handling purposes).
@warning The error estimation process will still be active (so
get_error_estimate() will still return a correct result), meaning
that the additional (typically, but not necessarily small)
computation required for error estimation will still be performed.
@throws std::exception if integrator does not support error
estimation and @p flag is set to `false`.
*/
void set_fixed_step_mode(bool flag) {
if (!flag && !supports_error_estimation())
throw std::logic_error("Integrator does not support accuracy estimation");
fixed_step_mode_ = flag;
}
/**
Gets whether an integrator is running in fixed step mode. If the integrator
does not support error estimation, this function will always return `true`.
@sa set_fixed_step_mode()
*/
bool get_fixed_step_mode() const {
return (!supports_error_estimation() || fixed_step_mode_);
}
// @}
/**
@name Methods for weighting state variable errors \
(in the context of error control)
@anchor weighting-state-errors
@{
This group of methods describes how errors for state variables with
heterogeneous units are weighted in the context of error-controlled
integration. This is an advanced topic and most users can simply specify
desired accuracy and accept the default state variable weights.
A collection of state variables is generally defined in heterogeneous units
(e.g. length, angles, velocities, energy). Some of the state
variables cannot even be expressed in meaningful units, like
quaternions. Certain integrators provide an estimate of the absolute error
made in each state variable during an integration step. These errors must
be properly weighted to obtain an "accuracy" _with respect to each
particular variable_. These per-variable accuracy determinations can be
compared against the user's requirements and used to select an appropriate
size for the next step [Sherman 2011]. The weights are
normally determined automatically using the system's characteristic
dimensions, so *most users can stop reading now!* Custom weighting is
primarily useful for performance improvement; an optimal weighting would
allow an error-controlled integrator to provide the desired level of
accuracy across all state variables without wasting computation
achieving superfluous accuracy for some of those variables.
Users interested in more precise control over state variable weighting may
use the methods in this group to access and modify weighting factors for
individual state variables. Changes to these weights can only be made prior
to integrator initialization or as a result of an event being triggered
and then followed by re-initialization.
_Relative versus absolute accuracy_:
%State variable integration error, as estimated by an integrator, is an
absolute quantity with the same
units as the variable. At each time step we therefore need to determine
an absolute error that would be deemed "good enough", i.e. satisfies
the user's accuracy requirement. If a variable is maintained to a
_relative_ accuracy then that "good enough" value is defined to be the
required accuracy `a` (a fraction like 0.001) times the current value of
the variable, as long as that value
is far from zero. For variables maintained to an *absolute* accuracy, or
relative variables that are at or near zero (where relative accuracy would
be undefined or too strict, respectively), we need a different way to
determine the "good enough" absolute error. The methods in this section
control how that absolute error value is calculated.
_How to choose weights_:
The weight `wᵢ` for a state variable `xᵢ` should be
chosen so that the product `wᵢ * dxᵢ` is unitless, and in particular is 1
when `dxᵢ` represents a "unit effect" of state variable `xᵢ`; that is, the
change in `xᵢ` that produces a unit change in some quantity of interest in
the system being simulated. Why unity (1)? Aside from normalizing the
values, unity "grounds" the weighted error to the user-specified accuracy.
A weighting can be applied individually to each state variable, but
typically it is done approximately by combining the known type of the
variable (e.g. length, angle) with a "characteristic scale" for that
quantity. For example, if a "characteristic length" for the system being
simulated is 0.1 meters, and `x₀` is a length variable measured in meters,
then `w₀` should be 10 so that `w₀*dx₀=1` when `dx₀=0.1`. For angles
representing pointing accuracy (say a camera direction) we typically assume
a "characteristic angle" is one radian (about 60 degrees), so if x₁ is a
pointing direction then w₁=1 is an appropriate weight. We can now scale an
error vector `e=[dx₀ dx₁]` to a unitless fractional error vector
`f=[w₀*dx₀ w₁*dx₁]`. Now to achieve a given accuracy `a`, say `a=.0001`,
we need only check that `|fᵢ|<=a` for each element `i` of `f`. Further,
this gives us a quantitative measure of "worst accuracy" that we can use
to increase or reduce size of the next attempted step, so that we will just
achieve the required accuracy but not much more. We'll be more precise
about this below.
@anchor quasi_coordinates
_Some subtleties for second-order dynamic systems_:
Systems governed by 2nd-order differential equations are typically split
into second order (configuration) variables q, and rate (velocity)
variables v, where the time derivatives qdot of q are linearly related to
v by the kinematic differential equation `qdot = dq/dt = N(q)*v`.
Velocity variables are
chosen to be physically significant, but configuration variables
may be chosen for convenience and do not necessarily have direct physical
interpretation. For examples, quaternions are chosen as a numerically
stable orientation representation. This is problematic for choosing weights
which must be done by physical reasoning
as sketched above. We resolve this by introducing
the notion of "quasi-coordinates" ꝗ (pronounced "qbar") which are defined
by the equation `ꝗdot = dꝗ/dt = v`. Other than time scaling,
quasi-coordinates have the same units as their corresponding velocity
variables. That is, for weighting we need to think
of the configuration coordinates in the same physical space as the velocity
variables; weight those by their physical significance; and then map back
to an instantaneous weighting
on the actual configuration variables q. This mapping is performed
automatically; you need only to be concerned about physical weightings.
Note that generalized quasi-coordinates `ꝗ` can only be defined locally for
a particular configuration `q`. There is in general no meaningful set of
`n` generalized
coordinates which can be differentiated with respect to time to yield `v`.
For example, the Hairy Ball Theorem implies that it is not possible for
three orientation variables to represent all 3D rotations without
singularities, yet three velocity variables can represent angular velocity
in 3D without singularities.
To summarize, separate weights can be provided for each of
- `n` generalized quasi-coordinates `ꝗ` (configuration variables in the
velocity variable space), and
- `nz` miscellaneous continuous state variables `z`.
Weights on the generalized velocity variables `v (= dꝗ/dt)` are derived
directly from the weights on `ꝗ`, weighted by a characteristic time.
Weights on the actual `nq` generalized coordinates can
be calculated efficiently from weights on the quasi-coordinates (details
below).
_How the weights are used_:
The errors in the `ꝗ` and `z` variables are weighted by the diagonal
elements
of diagonal weighting matrices Wꝗ and Wz, respectively. (The block-diagonal
weighting matrix `Wq` on the original generalized coordinates `q` is
calculated from `N` and `Wꝗ`; see below.) In the absence of
other information, the default for all weighting values is one, so `Wꝗ` and
`Wz` are `n × n` and `nz × nz` identity matrices. The weighting matrix `Wv`
for the velocity variables is just `Wv = τ*Wꝗ` where `τ` is a
"characteristic time" for the system, that is, a quantity in time units
that represents a significant evolution of the trajectory. This serves to
control the accuracy with which velocity is determined relative to
configuration. Note that larger values of `τ` are more conservative since
they increase the velocity weights. Typically we use `τ=1.0` or `0.1`
seconds for human-scale mechanical systems.
<!-- TODO(sherm1): provide more guidance for velocity weighting. -->
The weighting matrices `Wq`, `Wv`, and `Wz` are used to compute a weighted
infinity norm as follows. Although `Wv` and `Wz` are constant, the actual
weightings may be state dependent for relative-error calculations.
Define block diagonal error weighting matrix `E=diag(Eq,Ev,Ez)` as follows:
<pre>
Eq = Wq
Ev: Ev(i,i) = { min(Wv(i,i), 1/|vᵢ|) if vᵢ is relative
{ Wv(i,i) if vᵢ is absolute
Ez: Ez(i,i) = { min(Wz(i,i), 1/|zᵢ|) if zᵢ is relative
{ Wz(i,i) if zᵢ is absolute
</pre>
(`Ev` and `Ez` are diagonal.) A `v` or `z` will be maintained to relative
accuracy unless (a) it is "close" to zero (less than 1), or (b) the
variable has been defined as requiring absolute accuracy. Position
variables `q` are always maintained to absolute accuracy (see
[Sherman 2011] for rationale).
Now given an error estimate vector `e=[eq ev ez]`, the vector `f=E*e`
can be considered to provide a unitless fractional error for each of the
state variables. To achieve a given user-specified accuracy `a`, we require
that norm_inf(`f`) <= `a`. That is, no element of `f` can have absolute
value larger than `a`. We also use `f` to determine an ideal next step
size using an appropriate integrator-specific computation.
_Determining weights for q_:
The kinematic differential equations `qdot=N(q)*v` employ an `nq × n`
matrix `N`. By construction, this relationship is invertible using `N`'s
left pseudo-inverse `N⁺` so that `v=N⁺ qdot` and `N⁺ N = I` (the identity
matrix); however, `N N⁺ != I`, as `N` has more rows than columns generally.
[Nikravesh 1988] shows how such a matrix `N` can be determined and provides
more information. Given this relationship between `N` and `N⁺`, we can
relate weighted errors in configuration coordinates `q` to weighted errors
in generalized quasi-coordinates `ꝗ`, as the following derivation shows:
<pre>
v = N⁺ qdot Inverse kinematic differential equation
dꝗ/dt = N⁺ dq/dt Use synonyms for v and qdot
dꝗ = N⁺ dq Change time derivatives to differentials
Wꝗ dꝗ = Wꝗ N⁺ dq Pre-multiply both sides by Wꝗ
N Wꝗ dꝗ = N Wꝗ N⁺ dq Pre-multiply both sides by N
N Wꝗ dꝗ = Wq dq Define Wq := N Wꝗ N⁺
N Wꝗ v = Wq qdot Back to time derivatives.
</pre>
The last two equations show that `Wq` as defined above provides the
expected relationship between the weighted `ꝗ` or `v` variables in velocity
space and the weighted `q` or `qdot` (resp.) variables in configuration
space.
Finally, note that a diagonal entry of one of the weighting matrices can
be set to zero to disable error estimation for that state variable
(i.e., auxiliary variable or configuration/velocity variable pair), but
that setting an entry to a negative value will cause an exception to be
thrown when the integrator is initialized.
- [Nikravesh 1988] P. Nikravesh. Computer-Aided Analysis of Mechanical
Systems. Prentice Hall, 1988. Sec. 6.3.
- [Sherman 2011] M. Sherman, et al. Procedia IUTAM 2:241-261 (2011),
Section 3.3.
http://dx.doi.org/10.1016/j.piutam.2011.04.023
@sa CalcStateChangeNorm()
*/
/**
Gets the weighting vector (equivalent to a diagonal matrix) applied to
weighting both generalized coordinate and velocity state variable errors,
as described in the group documentation. Only used for integrators that
support error estimation.
*/
const Eigen::VectorXd& get_generalized_state_weight_vector() const {
return qbar_weight_;
}
/**
Gets a mutable weighting vector (equivalent to a diagonal matrix) applied
to weighting both generalized coordinate and velocity state variable
errors, as described in the group documentation. Only used for
integrators that support error estimation. Returns a VectorBlock
to make the values mutable without permitting changing the size of
the vector. Requires re-initializing the integrator after calling
this method; if Initialize() is not called afterward, an exception will be
thrown when attempting to call IntegrateNoFurtherThanTime(). If the caller
sets one of the entries to a negative value, an exception will be thrown
when the integrator is initialized.
*/
Eigen::VectorBlock<Eigen::VectorXd>
get_mutable_generalized_state_weight_vector() {
initialization_done_ = false;
return qbar_weight_.head(qbar_weight_.rows());
}
/**
Gets the weighting vector (equivalent to a diagonal matrix) for
weighting errors in miscellaneous continuous state variables `z`. Only used
for integrators that support error estimation.
*/
const Eigen::VectorXd& get_misc_state_weight_vector() const {
return z_weight_;
}
/**
Gets a mutable weighting vector (equivalent to a diagonal matrix) for
weighting errors in miscellaneous continuous state variables `z`. Only used
for integrators that support error estimation. Returns a VectorBlock
to make the values mutable without permitting changing the size of
the vector. Requires re-initializing the integrator after calling this
method. If Initialize() is not called afterward, an exception will be
thrown when attempting to call IntegrateNoFurtherThanTime(). If the caller
sets one of the entries to a negative value, an exception will be thrown
when the integrator is initialized.
*/
Eigen::VectorBlock<Eigen::VectorXd> get_mutable_misc_state_weight_vector() {
initialization_done_ = false;
return z_weight_.head(z_weight_.rows());
}
// @}
/**
@anchor integrator-initial-step-size
@name Methods related to initial step size
From [Watts 1983], "One of the more critical issues in solving ordinary
differential equations by a step-by-step process occurs in the starting
phase. Somehow the procedure must be supplied with an initial step size that
is on scale for the problem at hand. It must be small enough to yield a
reliable solution by the process, but it should not be so small as to
significantly affect the efficiency of solution. The more important of these
two possibilities is obviously the reliability question. The first step taken
by the code must reflect how fast the solution changes near the initial
point. For general purpose computing, an automatic step size adjustment
procedure for choosing subsequent steps is essential to produce an accurate
solution efficiently. This step size control is usually based on estimates of
the local errors incurred by the numerical method. Because most codes also
employ algorithmic devices which restrict the step size control to be
moderately varying (for reliability), subsequent steps usually tend to stay
on scale of the problem. This is not always so, as sometimes happens when
working with crude tolerances on problems having rapidly varying components.
Nevertheless, most step size adjustment procedures deal reasonably well with
all but the most abrupt changes, leaving the most serious danger confined to
the starting step size."
Users may not have a good idea of an initial step size to take, so
integration codes usually attempt to automatically select an initial step
size. Sophisticated algorithms for initial step size selection are described
in [Hindmarsh 1980], [Watts 1983], [Gladwell 1987], and [Hairer 2008]. These
algorithms can fail to produce a good initial step size as well (see
discussion in [Watts 1983]). Drake's integrators use a fraction (generally
1/10th of the maximum step size) to set the initial step size. If you have a
problem that operates at wildly varying time scales, e.g., Robertson's
canonical stiff system problem (that requires a large maximum step size to
be efficient), consider setting both the initial and maximum step sizes
(i.e., not using the defaults) to keep from missing phenomena that occur over
small time scales near the beginning of the time interval being integrated.
- [Gladwell 1987] I. Gladwell, L. F. Shampine, and R. W. Brankin. Automatic
selection of the initial step size for an ODE solver. J.
Comp. Appl. Math., Vol. 18, pp. 175-192, 1987.
- [Hairer 2008] E. Hairer, S. P. Norsett, and G. Wanner. Solving Ordinary
Differential Equations I (Nonstiff Problems), 2nd ed.
Springer, 2008.
- [Hindmarsh 1980] A. C. Hindmarsh. LSODE and LSODI, two new initial value
ordinary differential equation solvers. ACM Signum
Newletter 15, 4, 1980.
- [Robertson 1966] H.H. Robertson. The solution of a set of reaction rate
equations, pp. 178–182. Academic Press, 1966.
- [Watts 1983] H. A. Watts. Starting stepsize for an ODE solver. J. Comp.
Appl. Math., Vol. 8, pp. 177-191, 1983.
@{
*/
/**
Request that the first attempted integration step have a particular size.
If no request is made, the integrator will estimate a suitable size
for the initial step attempt. *If the integrator does not support error
control*, this method will throw a std::exception (call
supports_error_estimation() to verify before calling this method). For
variable-step integration, the initial target will be treated as a maximum
step size subject to accuracy requirements and event occurrences. You can
find out what size *actually* worked with
`get_actual_initial_step_size_taken()`.
@throws std::exception If the integrator does not support error
estimation.
*/
void request_initial_step_size_target(const T& step_size) {
using std::isnan;
if (!supports_error_estimation())
throw std::logic_error(
"Integrator does not support error estimation and "
"user has initial step size target");
req_initial_step_size_ = step_size;
}
/**
Gets the target size of the first integration step. You can find out what
step size was *actually* used for the first integration step with
`get_actual_initial_step_size_taken()`.
@see request_initial_step_size_target()
*/
const T& get_initial_step_size_target() const {
return req_initial_step_size_;
}
// @}
/**
@anchor integrator-maxstep
@name Methods related to maximum integration step size
Sets the _nominal_ maximum step size- the actual maximum step size taken
may be slightly larger (see set_maximum_step_size() and
get_stretch_factor())- that an integrator will take. Each integrator has a
default maximum step size, which might be infinite.
@{
*/
/**
Sets the maximum step size that may be taken by this integrator. This setting
should be used if you know the maximum time scale of your problem. The
integrator may stretch the maximum step size by as much as 1% to reach a
discrete event. For fixed step integrators, all steps will be taken at the
maximum step size *unless* an event would be missed.
@warning See @ref integrator-initial-step-size "Initial step size selection"
*/
// TODO(edrumwri): Update this comment when stretch size is configurable.
void set_maximum_step_size(const T& max_step_size) {
DRAKE_ASSERT(max_step_size >= 0.0);
max_step_size_ = max_step_size;
}
/**
Gets the maximum step size that may be taken by this integrator. This is
a soft maximum: the integrator may stretch it by as much as 1% to hit a
discrete event.
@sa set_requested_minimum_step_size()
*/
// TODO(edrumwri): Update this comment when stretch size is configurable.
const T& get_maximum_step_size() const { return max_step_size_; }
/**
Gets the stretch factor (> 1), which is multiplied by the maximum
(typically user-designated) integration step size to obtain the amount
that the integrator is able to stretch the maximum time step toward
hitting an upcoming publish or update event in
IntegrateNoFurtherThanTime().
@sa IntegrateNoFurtherThanTime()
*/
double get_stretch_factor() const { return 1.01; }
// @}
/**
@anchor integrator-minstep
@name Methods related to minimum integration step size selection and behavior
Variable step integrators reduce their step sizes as needed to achieve
requirements such as specified accuracy or step convergence. However, it is
not possible to take an arbitrarily small step. Normally integrators choose
an appropriate minimum step and throw an exception if the requirements can't
be achieved without going below that. Methods in this section allow you to
influence two aspects of this procedure:
- you can increase the minimum step size, and
- you can control whether an exception is thrown if a smaller step would have
been needed to achieve the aforementioned integrator requirements.
By default, integrators allow a very small minimum step which can result in
long run times. Setting a larger minimum can be helpful as a diagnostic to
figure out what aspect of your simulation is requiring small steps. You can
set the minimum to what should be a "reasonable" minimum based on what you
know about the physical system. You will then get an std::runtime_error
exception thrown at any point in time where your model behaves unexpectedly
(due to, e.g., a discontinuity in the derivative evaluation function).
If you disable the exception (via
`set_throw_on_minimum_step_size_violation(false)`), the integrator will
simply proceed with a step of the minimum size: accuracy is guaranteed only
when the minimum step size is not violated. Beware that there can be no
guarantee about the magnitude of any errors introduced by violating the
accuracy "requirements" in this manner, so disabling the exception should be
done warily.
#### Details
Because time is maintained to finite precision, the integrator uses a scalar
`h_floor` to constrain time step h ≥ `h_floor` such that `current_time + h >
current_time` will be strictly satisfied. The integrator will never
automatically decrease its step below `h_floor`. We calculate `h_floor=max(ε,
ε⋅abs(t))`, where t is the current time and ε is a small multiple of machine
precision, typically a number like 1e-14. Note that `h_floor` necessarily
grows with time; if that is a concern you should limit how long your
simulations are allowed to run without resetting time.
You may request a larger minimum step size `h_min`. Then at every time t, the
integrator determines a "working" minimum `h_work=max(h_min, h_floor)`. If
the step size selection algorithm determines that a step smaller than
`h_work` is needed to meet accuracy or other needs, then a std::runtime_error
exception will be thrown and the simulation halted. On the other hand, if you
have suppressed the exception (again, via
`set_throw_on_minimum_step_size_violation(false)`), the integration will
continue, taking a step of size `h_work`.
Under some circumstances the integrator may legitimately take a step of size
`h` smaller than your specified `h_min`, although never smaller than
`h_floor`. For example, occasionally the integrator may reach an event or
time limit that occurs a very short time after the end of a previous step,
necessitating that a tiny "sliver" of a step be taken to complete the
interval. That does not indicate an error, and required accuracy and
convergence goals are achieved. Larger steps can resume immediately
afterwards. Another circumstance is when one of the integrator's stepping
methods is called directly requesting a very small step, for example
`IntegrateWithMultipleStepsToTime(h)`. No exception will be thrown in either
of these cases.
*/
//@{
/**
Sets the requested minimum step size `h_min` that may be taken by this
integrator. No step smaller than this will be taken except under
circumstances as described @ref integrator-minstep "above". This setting will
be ignored if it is smaller than the absolute minimum `h_floor` also
described above. Default value is zero.
@param min_step_size a non-negative value. Setting this value to zero
will cause the integrator to use a reasonable value
instead (see get_working_minimum_step_size()).
@sa get_requested_minimum_step_size()
@sa get_working_minimum_step_size()
*/
void set_requested_minimum_step_size(const T& min_step_size) {
DRAKE_ASSERT(min_step_size >= 0.0);
req_min_step_size_ = min_step_size;
}
/**
Gets the requested minimum step size `h_min` for this integrator.
@sa set_requested_minimum_step_size()
@sa get_working_minimum_step_size(T)
*/
const T& get_requested_minimum_step_size() const {
return req_min_step_size_; }
/**
Sets whether the integrator should throw a std::exception
when the integrator's step size selection algorithm determines that it
must take a step smaller than the minimum step size (for, e.g., purposes
of error control). Default is `true`. If `false`, the integrator will
advance time and state using the minimum specified step size in such
situations. See @ref integrator-minstep "this section" for more detail.
*/
void set_throw_on_minimum_step_size_violation(bool throws) {
min_step_exceeded_throws_ = throws;
}
/**
Reports the current setting of the throw_on_minimum_step_size_violation
flag.
@sa set_throw_on_minimum_step_size_violation().
*/
bool get_throw_on_minimum_step_size_violation() const {
return min_step_exceeded_throws_;
}
/**
Gets the current value of the working minimum step size `h_work(t)` for
this integrator, which may vary with the current time t as stored in the
integrator's context.
See @ref integrator-minstep "this section" for more detail.
*/
T get_working_minimum_step_size() const {
using std::max;
using std::abs;
// Tolerance is just a number close to machine epsilon.
const double tol = 1e-14;
const T smart_minimum = max(tol, abs(get_context().get_time()) * tol);
return max(smart_minimum, req_min_step_size_);
}
// @}
/**
Resets the integrator to initial values, i.e., default construction
values.
*/
void Reset() {
// Kill the error estimate and weighting matrices.
err_est_.reset();
qbar_weight_.setZero(0);
z_weight_.setZero(0);
pinvN_dq_change_.reset();
unweighted_substate_change_.setZero(0);
weighted_q_change_.reset();
// Drops dense output, if any.
dense_output_.reset();
// Integrator no longer operates in fixed step mode.
fixed_step_mode_ = false;
// Statistics no longer valid.
ResetStatistics();
// Wipe out settings.
req_min_step_size_ = 0;
max_step_size_ = nan();
accuracy_in_use_ = nan();
// Indicate values used for error controlled integration no longer valid.
prev_step_size_ = nan();
ideal_next_step_size_ = nan();
// Call the derived integrator reset routine.
DoReset();
// Indicate that initialization is necessary.
initialization_done_ = false;
}
/**
An integrator must be initialized before being used. The pointer to the
context must be set before Initialize() is called (or an std::exception
will be thrown). If Initialize() is not called, an exception will be
thrown when attempting to call IntegrateNoFurtherThanTime(). To reinitialize
the integrator, Reset() should be called followed by Initialize().
@throws std::exception If the context has not been set or a user-set
parameter has been set illogically (i.e., one of the
weighting matrix coefficients is set to a negative value- this
check is only performed for integrators that support error
estimation; the maximum step size is smaller than the minimum
step size; the requested initial step size is outside of the
interval [minimum step size, maximum step size]).
@sa Reset()
*/
void Initialize() {
if (!context_) throw std::logic_error("Context has not been set.");
// Verify that user settings are reasonable.
if constexpr (scalar_predicate<T>::is_bool) {
if (max_step_size_ < req_min_step_size_) {
throw std::logic_error("Integrator maximum step size is less than the "
"minimum step size");
}
if (req_initial_step_size_ > max_step_size_) {
throw std::logic_error("Requested integrator initial step size is "
"larger than the maximum step size.");
}
if (req_initial_step_size_ < req_min_step_size_) {
throw std::logic_error("Requested integrator initial step size is "
"smaller than the minimum step size.");
}
}
// TODO(edrumwri): Compute qbar_weight_, z_weight_ automatically.
// Set error weighting vectors if not already done.
if (supports_error_estimation()) {
// Allocate space for the error estimate.
err_est_ = system_.AllocateTimeDerivatives();
const auto& xc = context_->get_state().get_continuous_state();
const int gv_size = xc.get_generalized_velocity().size();
const int misc_size = xc.get_misc_continuous_state().size();
if (qbar_weight_.size() != gv_size) qbar_weight_.setOnes(gv_size);
if (z_weight_.size() != misc_size) z_weight_.setOnes(misc_size);
// Verify that minimum values of the weighting matrices are non-negative.
if ((qbar_weight_.size() && qbar_weight_.minCoeff() < 0) ||
(z_weight_.size() && z_weight_.minCoeff() < 0))
throw std::logic_error("Scaling coefficient is less than zero.");
}
// Statistics no longer valid.
ResetStatistics();
// Call the derived integrator initialization routine (if any)
DoInitialize();
initialization_done_ = true;
}
/**
(Internal use only) Integrates the system forward in time by a single step
with step size subject to integration error tolerances (assuming that the
integrator supports error estimation). The integrator must already have
been initialized or an exception will be thrown. The context will be
integrated to a time that will never exceed the minimum of
`publish_time`, `update_time`, and the current time plus
`1.01 * get_maximum_step_size()`.
@param publish_time The present or future time (exception will be thrown
if this is not the case) at which the next publish will occur.
@param update_time The present or future time (exception will be thrown
if this is not the case) at which the next update will occur.
@param boundary_time The present or future time (exception will be thrown
if this is not the case) marking the end of the user-designated
simulated interval.
@throws std::exception If the integrator has not been initialized or one
of publish_time, update_time, or boundary_time is
in the past.
@return The reason for the integration step ending.
@post The time in the context will be no greater than
`min(publish_time, update_time, boundary_time)`.
@warning Users should generally not call this function directly; within
simulation circumstances, users will typically call
`Simulator::AdvanceTo()`. In other circumstances, users will
typically call
`IntegratorBase::IntegrateWithMultipleStepsToTime()`.
This method at a glance:
- For integrating ODEs/DAEs via Simulator
- Supports fixed step and variable step integration schemes
- Takes only a single step forward.
*/
// TODO(edrumwri): Make the stretch size configurable.
StepResult IntegrateNoFurtherThanTime(
const T& publish_time, const T& update_time, const T& boundary_time);
/**
Stepping function for integrators operating outside of Simulator that
advances the continuous state exactly to `t_final`. This method is
designed for integrator users that do not wish to consider publishing or
discontinuous, mid-interval updates. This method will step the integrator
multiple times, as necessary, to attain requested error tolerances and
to ensure the integrator converges.
@warning Users should simulate systems using `Simulator::AdvanceTo()` in
place of this function (which was created for off-simulation
purposes), generally.
@param t_final The current or future time to integrate to.
@throws std::exception If the integrator has not been initialized or
t_final is in the past.
@sa IntegrateNoFurtherThanTime(), which is designed to be operated by
Simulator and accounts for publishing and state reinitialization.
@sa IntegrateWithSingleFixedStepToTime(), which is also designed to be
operated *outside of* Simulator, but throws an exception if the
integrator cannot advance time to `t_final` in a single step.
This method at a glance:
- For integrating ODEs/DAEs not using Simulator
- Supports fixed step and variable step integration schemes
- Takes as many steps as necessary until time has advanced to `t_final`
*/
void IntegrateWithMultipleStepsToTime(const T& t_final) {
using std::max;
using std::min;
const Context<T>& context = get_context();
const T inf = std::numeric_limits<double>::infinity();
do {
IntegrateNoFurtherThanTime(inf, inf,
min(t_final, context.get_time() + get_maximum_step_size()));
} while (context.get_time() < t_final);
}
/**
Stepping function for integrators operating outside of Simulator that
advances the continuous state *using a single step* to `t_target`.
This method is designed for integrator users that do not wish to
consider publishing or discontinuous, mid-interval updates. One such
example application is that of direct transcription for trajectory
optimization, for which the integration process should be _consistent_: it
should execute the same sequence of arithmetic operations for all values
of the nonlinear programming variables. In keeping with the naming
semantics of this function, error controlled integration is not supported
(though error estimates will be computed for integrators that support that
feature), which is a minimal requirement for "consistency".
@warning Users should simulate systems using `Simulator::AdvanceTo()` in
place of this function (which was created for off-simulation
purposes), generally.
@param t_target The current or future time to integrate to.
@throws std::exception If the integrator has not been initialized or
`t_target` is in the past or the integrator
is not operating in fixed step mode.
@sa IntegrateNoFurtherThanTime(), which is designed to be operated by
Simulator and accounts for publishing and state reinitialization.
@sa IntegrateWithMultipleStepsToTime(), which is also designed to be
operated *outside of* Simulator, but will take as many integration
steps as necessary until time has been stepped forward to `t_target`.
@returns `true` if the integrator was able to take a single fixed step to
`t_target`.
This method at a glance:
- For integrating ODEs/DAEs not using Simulator
- Fixed step integration (no step size reductions for error control or
integrator convergence)
- Takes only a single step forward.
*/
[[nodiscard]] bool IntegrateWithSingleFixedStepToTime(const T& t_target) {
using std::max;
using std::abs;
const T h = t_target - context_->get_time();
if (scalar_predicate<T>::is_bool && h < 0) {
throw std::logic_error("IntegrateWithSingleFixedStepToTime() called with "
"a negative step size.");
}
if (!this->get_fixed_step_mode())
throw std::logic_error("IntegrateWithSingleFixedStepToTime() requires "
"fixed stepping.");
if (!Step(h))
return false;
UpdateStepStatistics(h);
if constexpr (scalar_predicate<T>::is_bool) {
// Correct any round-off error that has occurred. Formula below requires
// that time be non-negative.
DRAKE_DEMAND(context_->get_time() >= 0);
const double tol = 10 * std::numeric_limits<double>::epsilon() *
ExtractDoubleOrThrow(max(1.0, max(t_target, context_->get_time())));
DRAKE_DEMAND(abs(context_->get_time() - t_target) < tol);
}
context_->SetTime(t_target);
return true;
}
/**
@name Integrator statistics methods
@{
These methods allow the caller to manipulate and query integrator
statistics. Generally speaking, the larger the integration step taken, the
faster a simulation will run. These methods allow querying (and resetting)
the integrator statistics as one means of determining how to make
a simulation run faster.
*/
/**
Forget accumulated statistics. These are reset to the values they have
post construction or immediately after `Initialize()`.
*/
void ResetStatistics() {
actual_initial_step_size_taken_ = nan();
smallest_adapted_step_size_taken_ = nan();
largest_step_size_taken_ = nan();
num_steps_taken_ = 0;
num_ode_evals_ = 0;
num_shrinkages_from_error_control_ = 0;
num_shrinkages_from_substep_failures_ = 0;
num_substep_failures_ = 0;
DoResetStatistics();
}
/**
Gets the number of failed sub-steps (implying one or more step size
reductions was required to permit solving the necessary nonlinear system
of equations).
*/
int64_t get_num_substep_failures() const {
return num_substep_failures_;
}
/**
Gets the number of step size shrinkages due to sub-step failures (e.g.,
integrator convergence failures) since the last call to ResetStatistics()
or Initialize().
*/
int64_t get_num_step_shrinkages_from_substep_failures() const {
return num_shrinkages_from_substep_failures_;
}
/// Gets the number of step size shrinkages due to failure to meet targeted
/// error tolerances, since the last call to ResetStatistics or Initialize().
int64_t get_num_step_shrinkages_from_error_control() const {
return num_shrinkages_from_error_control_;
}
/**
Returns the number of ODE function evaluations (calls to
CalcTimeDerivatives()) since the last call to ResetStatistics() or
Initialize(). This count includes *all* such calls including (1)
those necessary to compute Jacobian matrices; (2) those used in rejected
integrated steps (for, e.g., purposes of error control); (3) those used
strictly for integrator error estimation; and (4) calls that exhibit little
cost (due to results being cached).
*/
int64_t get_num_derivative_evaluations() const { return num_ode_evals_; }
/**
The actual size of the successful first step.
*/
const T& get_actual_initial_step_size_taken() const {
return actual_initial_step_size_taken_;
}
/**
The size of the smallest step taken *as the result of a controlled
integration step adjustment* since the last Initialize() or
ResetStatistics() call. This value will be NaN for integrators without
error estimation.
*/
const T& get_smallest_adapted_step_size_taken() const {
return smallest_adapted_step_size_taken_;
}
/**
The size of the largest step taken since the last Initialize() or
ResetStatistics() call.
*/
const T& get_largest_step_size_taken() const {
return largest_step_size_taken_;
}
/**
The number of integration steps taken since the last Initialize()
or ResetStatistics() call.
*/
int64_t get_num_steps_taken() const { return num_steps_taken_; }
/** Manually increments the statistic for the number of ODE evaluations.
@warning Implementations should generally avoid calling this method;
evaluating the ODEs using EvalTimeDerivatives() updates this
statistic automatically and intelligently (by leveraging the
caching system to avoid incrementing the count when cached
evaluations are used).
*/
void add_derivative_evaluations(double evals) { num_ode_evals_ += evals; }
// @}
/**
Returns a const reference to the internally-maintained Context holding
the most recent state in the trajectory. This is suitable for publishing or
extracting information about this trajectory step.
*/
const Context<T>& get_context() const { return *context_; }
/**
Returns a mutable pointer to the internally-maintained Context holding
the most recent state in the trajectory.
*/
Context<T>* get_mutable_context() { return context_; }
/**
Replace the pointer to the internally-maintained Context with a different
one. This is useful for supplying a new set of initial conditions or
wiping out the current context (by passing in a null pointer). You
should invoke Initialize() after replacing the Context unless the
context is null.
@param context The pointer to the new context or nullptr to wipe out
the current context without replacing it with another.
*/
void reset_context(Context<T>* context) {
context_ = context;
initialization_done_ = false;
}
/**
@name Methods for dense output computation
@anchor dense_output_computation
@{
In general, dense output computations entail both CPU load and memory
footprint increases during numerical integration. For some applications,
the performance penalty may be prohibitive. As such, these computations
are only carried out by explicit user request. The API to start and stop
a _dense integration_ process (i.e. a numerical integration process that
also computes dense output) is consistent with this design choice.
Once dense integration is started, and until it is stopped, all subsequent
integration steps taken will update the allocated dense output.
*/
/**
Starts dense integration, allocating a new dense output for this integrator
to use.
@pre The integrator has been initialized.
@pre The system being integrated has continuous state.
@pre No dense integration is in progress (no dense output is held by the
integrator)
@throws std::exception if any of the preconditions is not met.
@warning Dense integration may incur significant overhead.
*/
void StartDenseIntegration() {
if (!is_initialized()) {
throw std::logic_error("Integrator was not initialized.");
}
if (get_context().num_continuous_states() == 0) {
throw std::logic_error("System has no continuous state,"
" no dense output can be built.");
}
if (get_dense_output()) {
throw std::logic_error("Dense integration has been started already.");
}
dense_output_ = std::make_unique<trajectories::PiecewisePolynomial<T>>();
}
/**
Returns a const pointer to the integrator's current PiecewisePolynomial
instance, holding a representation of the continuous state trajectory since
the last StartDenseIntegration() call. This is suitable to query the
integrator's current dense output, if any (may be nullptr).
*/
const trajectories::PiecewisePolynomial<T>* get_dense_output() const {
return dense_output_.get();
}
/**
Stops dense integration, yielding ownership of the current dense output
to the caller.
@remarks This process is irreversible.
@returns A PiecewisePolynomial instance, i.e. a representation of the
continuous state trajectory of the system being integrated
that can be evaluated at any time within its extension. This
representation is defined starting at the context time of the
last StartDenseIntegration() call and finishing at the current
context time.
@pre Dense integration is in progress (a dense output is held by this
integrator, after a call to StartDenseIntegration()).
@post Previously held dense output is not updated nor referenced by
the integrator anymore.
@throws std::exception if any of the preconditions is not met.
*/
std::unique_ptr<trajectories::PiecewisePolynomial<T>> StopDenseIntegration() {
if (!dense_output_) {
throw std::logic_error("No dense integration has been started.");
}
return std::move(dense_output_);
}
// @}
/**
Gets a constant reference to the system that is being integrated (and
was provided to the constructor of the integrator).
*/
const System<T>& get_system() const { return system_; }
/// Indicates whether the integrator has been initialized.
bool is_initialized() const { return initialization_done_; }
/**
Gets the size of the last (previous) integration step. If no integration
steps have been taken, value will be NaN.
*/
const T& get_previous_integration_step_size() const {
return prev_step_size_;
}
protected:
/**
Resets any statistics particular to a specific integrator. The default
implementation of this function does nothing. If your integrator
collects its own statistics, you should re-implement this method and
reset them there.
*/
virtual void DoResetStatistics() {}
/**
Evaluates the derivative function and updates call statistics.
Subclasses should call this function rather than calling
system.EvalTimeDerivatives() directly.
*/
const ContinuousState<T>& EvalTimeDerivatives(const Context<T>& context) {
return EvalTimeDerivatives(get_system(), context); // See below.
}
/**
Evaluates the derivative function (and updates call statistics).
Subclasses should call this function rather than calling
system.EvalTimeDerivatives() directly. This version of this function
exists to allow integrators to include AutoDiff'd systems in derivative
function evaluations.
*/
template <typename U>
const ContinuousState<U>& EvalTimeDerivatives(const System<U>& system,
const Context<U>& context) {
const CacheEntry& entry = system.get_time_derivatives_cache_entry();
const CacheEntryValue& value = entry.get_cache_entry_value(context);
const int64_t serial_number_before = value.serial_number();
const ContinuousState<U>& derivs =
system.EvalTimeDerivatives(context);
if (value.serial_number() != serial_number_before) {
++num_ode_evals_; // Wasn't already cached.
}
return derivs;
}
/**
Sets the working ("in use") accuracy for this integrator. The working
accuracy may not be equivalent to the target accuracy when the latter is
too loose or tight for an integrator's capabilities.
@sa get_accuracy_in_use()
@sa get_target_accuracy()
*/
void set_accuracy_in_use(double accuracy) { accuracy_in_use_ = accuracy; }
/**
Default code for advancing the continuous state of the system by a single
step of @p h_max (or smaller, depending on error control). This particular
function is designed to be called directly by an error estimating
integrator's DoStep() method to effect error-controlled integration.
The integrator can effect error controlled integration without calling this
method, if the implementer so chooses, but this default method is expected
to function well in most circumstances.
@param[in] h_max The maximum step size to be taken. The integrator may
take a smaller step than specified to satisfy accuracy
requirements, to resolve integrator convergence problems, or
to respect the integrator's maximum step size.
@throws std::exception if integrator does not support error
estimation.
@note This function will shrink the integration step as necessary whenever
the integrator's DoStep() fails to take the requested step
e.g., due to integrator convergence failure.
@returns `true` if the full step of size @p h_max is taken and `false`
otherwise (i.e., a smaller step than @p h_max was taken).
*/
bool StepOnceErrorControlledAtMost(const T& h_max);
/**
Computes the infinity norm of a change in continuous state. We use the
infinity norm to capture the idea that, by providing accuracy requirements,
the user can indirectly specify error tolerances that act to limit the
largest error in any state vector component.
@returns the norm (a non-negative value)
*/
T CalcStateChangeNorm(const ContinuousState<T>& dx_state) const;
/**
Calculates adjusted integrator step sizes toward keeping state variables
within error bounds on the next integration step. Note that it is not
guaranteed that the (possibly) reduced step size will keep state variables
within error bounds; however, the process of (1) taking a trial
integration step, (2) calculating the error, and (3) adjusting the step
size can be repeated until convergence.
@param err
The norm of the integrator error that was computed using
@p attempted_step_size.
@param attempted_step_size
The step size that was attempted.
@param[in,out] at_minimum_step_size
If `true` on entry, the error control mechanism is not allowed to
shrink the step because the integrator is stepping at the minimum
step size (note that this condition will only occur if
`get_throw_on_minimum_step_size_violation() == false`- an exception
would be thrown otherwise). If `true` on entry and `false` on exit,
the error control mechanism has managed to increase the step size
above the working minimum; if `true` on entry and `true` on exit,
error control would like to shrink the step size but cannot. If
`false` on entry and `true` on exit, error control shrank the step
to the working minimum step size.
@returns a pair of types bool and T; the bool will be set to `true` if
the integration step was to be considered successful and `false`
otherwise. The value of the T type will be set to the recommended next
step size.
*/
std::pair<bool, T> CalcAdjustedStepSize(
const T& err,
const T& attempted_step_size,
bool* at_minimum_step_size) const;
/**
Derived classes can override this method to perform special
initialization. This method is called during the Initialize() method. This
default method does nothing.
*/
virtual void DoInitialize() {}
/**
Derived classes can override this method to perform routines when
Reset() is called. This default method does nothing.
*/
virtual void DoReset() {}
/**
Returns a mutable pointer to the internally-maintained PiecewisePolynomial
instance, holding a representation of the continuous state trajectory since
the last time StartDenseIntegration() was called. This is useful for
derived classes to update the integrator's current dense output, if any
(may be nullptr).
*/
trajectories::PiecewisePolynomial<T>* get_mutable_dense_output() {
return dense_output_.get();
}
/**
Derived classes must implement this method to (1) integrate the continuous
portion of this system forward by a single step of size @p h and
(2) set the error estimate (via get_mutable_error_estimate()). This
method is called during the integration process (via
StepOnceErrorControlledAtMost(), IntegrateNoFurtherThanTime(), and
IntegrateWithSingleFixedStepToTime()).
@param h The integration step to take.
@returns `true` if successful, `false` if the integrator was unable to take
a single step of size @p h (due to, e.g., an integrator
convergence failure).
@post If the time on entry is denoted `t`, the time and state will be
advanced to `t+h` if the method returns `true`; otherwise, the
time and state should be reset to those at `t`.
@warning It is expected that DoStep() will return `true` for some, albeit
possibly very small, positive value of @p h. The derived
integrator's stepping algorithm can make this guarantee, for
example, by switching to an algorithm not subject to convergence
failures (e.g., explicit Euler) for very small step sizes.
*/
virtual bool DoStep(const T& h) = 0;
// TODO(russt): Allow subclasses to override the interpolation scheme used, as
// the 'optimal' dense output scheme is only known by the specific integration
// scheme being implemented.
/**
Calls DoStep(h) while recording the resulting step in the dense output. If
the current dense output is already non-empty, then the time in the current
context must match either the final segment time of the dense output, or the
penultimate segment time (to support the case where the same integration step
is attempted multiple times, which occurs e.g. in witness function
isolation).
@param h The integration step to take.
@returns `true` if successful, `false` if either the integrator was unable to
take a single step of size @p h or to advance its dense output an
equal step.
@sa DoStep()
*/
bool DoDenseStep(const T& h) {
const ContinuousState<T>& state = context_->get_continuous_state();
// Note: It is tempting to avoid this initial call to EvalTimeDerivatives,
// and just use AppendCubicHermiteSegment below. But this version is robust
// to e.g. UnrestrictedUpdates or any other changes that could occur between
// calls to DoDenseStep(). And we hope that the caching in
// EvalTimeDerivatives() avoids any cost for the easy case.
const T start_time = context_->get_time();
VectorX<T> start_state, start_derivatives;
start_state = state.CopyToVector();
start_derivatives = EvalTimeDerivatives(*context_).CopyToVector();
// Performs the integration step.
if (!DoStep(h)) return false;
// Allow this update to *replace* the final segment if the start_time of
// this step is earlier than the current end_time of the dense output and
// matches the start_time of the final segment of the dense output.
// This happens, for instance, when the Simulator is doing WitnessFunction
// isolation; it routinely back up the integration and try the same step
// multiple times. Note: we intentionally check for equality between
// double values here.
if (dense_output_->get_segment_times().size() > 1 &&
start_time < dense_output_->end_time() &&
start_time == dense_output_->get_segment_times().end()[-2]) {
dense_output_->RemoveFinalSegment();
}
const ContinuousState<T>& derivatives = EvalTimeDerivatives(*context_);
dense_output_->ConcatenateInTime(
trajectories::PiecewisePolynomial<T>::CubicHermite(
std::vector<T>({start_time, context_->get_time()}),
{start_state, state.CopyToVector()},
{start_derivatives, derivatives.CopyToVector()}));
return true;
}
/**
* Gets an error estimate of the state variables recorded by the last call
* to StepOnceFixedSize(). If the integrator does not support error
* estimation, this function will return nullptr.
*/
ContinuousState<T>* get_mutable_error_estimate() { return err_est_.get(); }
// Sets the actual initial step size taken.
void set_actual_initial_step_size_taken(const T& h) {
actual_initial_step_size_taken_ = h;
}
/**
* Sets the size of the smallest-step-taken statistic as the result of a
* controlled integration step adjustment.
*/
void set_smallest_adapted_step_size_taken(const T& h) {
smallest_adapted_step_size_taken_ = h;
}
// Sets the largest-step-size-taken statistic.
void set_largest_step_size_taken(const T& h) {
largest_step_size_taken_ = h;
}
// Sets the "ideal" next step size (typically done via error control).
void set_ideal_next_step_size(const T& h) { ideal_next_step_size_ = h; }
private:
// Validates that a smaller step size does not fall below the working minimum
// and throws an exception if desired.
void ValidateSmallerStepSize(const T& current_step_size,
const T& new_step_size) const {
if (new_step_size < get_working_minimum_step_size() &&
new_step_size < current_step_size && // Verify step adjusted downward.
min_step_exceeded_throws_) {
DRAKE_LOGGER_DEBUG("Integrator wants to select too small step "
"size of {}; working minimum is ", new_step_size,
get_working_minimum_step_size());
std::ostringstream str;
// TODO(russt): Link to the "debugging dynamical systems" tutorial
// (#17249) once it exists.
str << "Error control wants to select step smaller than minimum"
<< " allowed (" << get_working_minimum_step_size()
<< "). This is typically an indication that some part of your system "
"*with continuous state* is going unstable and/or is producing "
"excessively large derivatives.";
throw std::runtime_error(str.str());
}
}
// Updates the integrator statistics, accounting for a step just taken of
// size h.
void UpdateStepStatistics(const T& h) {
// Handle first step specially.
if (++num_steps_taken_ == 1) {
set_actual_initial_step_size_taken(h);
set_largest_step_size_taken(h);
} else {
if (h > get_largest_step_size_taken()) set_largest_step_size_taken(h);
}
// Update the previous step size.
prev_step_size_ = h;
}
// Steps the system forward exactly by @p h, if possible, by calling DoStep
// or DoDenseStep depending on whether dense integration was started or not.
// Does necessary pre-initialization and post-cleanup. This method does not
// update general integrator statistics (which are updated in the calling
// methods), because error control might decide that it does not like the
// result of the step and might "rewind" and take a smaller one.
// @returns `true` if successful, `false` otherwise (due to, e.g., integrator
// convergence failure).
// @note The working minimum step size does not apply here- see
// @ref integrator-minstep "this section" for details.
// @sa DoStep()
// @sa DoDenseStep()
bool Step(const T& h) {
if (get_dense_output()) {
return DoDenseStep(h);
}
return DoStep(h);
}
// Reference to the system being simulated.
const System<T>& system_;
// Pointer to the context.
Context<T>* context_{nullptr}; // The trajectory Context.
// Current dense output.
std::unique_ptr<trajectories::PiecewisePolynomial<T>> dense_output_{nullptr};
// Runtime variables.
// For variable step integrators, this is set at the end of each step to guide
// the next one.
T ideal_next_step_size_{nan()}; // Indicates that the value is uninitialized.
// The scaling factor to apply to an integration step size when an integrator
// convergence failure occurs (to make convergence more likely on the next
// attempt).
// TODO(edrumwri): Allow subdivision factor to be user-tweakable.
const double subdivision_factor_{0.5};
// The accuracy being used.
double accuracy_in_use_{nan()};
// The maximum step size.
T max_step_size_{nan()};
// The minimum step size.
T req_min_step_size_{0};
// The last step taken by the integrator.
T prev_step_size_{nan()};
// Whether error-controlled integrator is running in fixed step mode. Value
// is irrelevant for integrators without error estimation capabilities.
bool fixed_step_mode_{false};
// When the minimum step is exceeded, does the integrator throw an exception?
bool min_step_exceeded_throws_{true};
// Statistics.
T actual_initial_step_size_taken_{nan()};
T smallest_adapted_step_size_taken_{nan()};
T largest_step_size_taken_{nan()};
int64_t num_steps_taken_{0};
int64_t num_ode_evals_{0};
int64_t num_shrinkages_from_error_control_{0};
int64_t num_shrinkages_from_substep_failures_{0};
int64_t num_substep_failures_{0};
// Applied as diagonal matrices to weight state change variables.
Eigen::VectorXd qbar_weight_, z_weight_;
// State copy for reversion during error-controlled integration.
VectorX<T> xc0_save_;
// The error estimate computed during integration with error control.
std::unique_ptr<ContinuousState<T>> err_est_;
// The pseudo-inverse of the matrix that converts time derivatives of
// generalized coordinates to generalized velocities, multiplied by the
// change in the generalized coordinates (used in state change norm
// calculations).
mutable std::unique_ptr<VectorBase<T>> pinvN_dq_change_;
// Vectors used in state change norm calculations.
mutable VectorX<T> unweighted_substate_change_;
mutable std::unique_ptr<VectorBase<T>> weighted_q_change_;
// Variable for indicating when an integrator has been initialized.
bool initialization_done_{false};
// This a workaround for an apparent bug in clang 3.8 in which
// defining this as a static constexpr member kNaN failed to instantiate
// properly for the AutoDiffXd instantiation (worked in gcc and MSVC).
// Restore to sanity when some later clang is current.
static constexpr double nan() {
return std::numeric_limits<double>::quiet_NaN();
}
double target_accuracy_{nan()}; // means "unspecified, use default"
T req_initial_step_size_{nan()}; // means "unspecified, use default"
};
} // namespace systems
} // namespace drake
DRAKE_DECLARE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_SCALARS(
class drake::systems::IntegratorBase)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/dense_output.h | #pragma once
#include <fmt/format.h>
#include "drake/common/default_scalars.h"
#include "drake/common/drake_copyable.h"
#include "drake/common/eigen_types.h"
namespace drake {
namespace systems {
/// An interface for dense output of ODE solutions, to efficiently approximate
/// them at arbitrarily many points when solving them numerically (see
/// IntegratorBase class documentation).
///
/// Multiple definitions of _dense output_ may be found in literature. For some
/// authors, it refers to the process of repeatedly adjusting the integration
/// step size so that all points to be approximated are directly provided by the
/// integrator (see [Engquist, 2015]). For others, it stands for any numerical
/// approximation technique used to determine the solution in between steps
/// (see [Hairer, 1993]). Despite this caveat, it is common terminology in IVP
/// literature and thus its imparted functionality is immediately clear.
///
/// Herein, the concept in use may be formally stated as follows: given a
/// solution 𝐱(t) ∈ ℝⁿ to an ODE system that is approximated at a discrete
/// set of points 𝐲(tₖ) ∈ ℝⁿ where tₖ ∈ {t₁, ..., tᵢ} with tᵢ ∈ ℝ (e.g. as
/// a result of numerical integration), a dense output of 𝐱(t) is another
/// function 𝐳(t) ∈ ℝⁿ defined for t ∈ [t₁, tᵢ] such that 𝐳(tⱼ) = 𝐲(tⱼ) for
/// all tⱼ ∈ {t₁, ..., tᵢ} and that approximates 𝐱(t) for every value in the
/// closed interval [t₁, tᵢ].
///
/// @warning Dense outputs are, in general, not bound to attain the same
/// accuracy that error-controlled integration schemes do. Check
/// each subclass documentation for further specification.
/// @warning Note that dense outputs do not enforce any algebraic constraints
/// on the solution that integrators might enforce.
///
/// - [Engquist, 2105] B. Engquist. Encyclopedia of Applied and Computational
/// Mathematics, p. 339, Springer, 2015.
/// - [Hairer, 1993] E. Hairer, S. Nørsett and G. Wanner. Solving Ordinary
/// Differential Equations I (Nonstiff Problems), p.188,
/// Springer, 1993.
/// @tparam_default_scalar
template <typename T>
class DenseOutput {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(DenseOutput)
virtual ~DenseOutput() = default;
/// Evaluates the output at the given time @p t.
/// @param t Time at which to evaluate output.
/// @returns Output vector value.
/// @pre Output is not empty i.e. is_empty() equals false.
/// @throws std::exception if any of the preconditions are not met.
/// @throws std::exception if given @p t is not within output's domain
/// i.e. @p t ∉ [start_time(), end_time()].
VectorX<T> Evaluate(const T& t) const {
ThrowIfOutputIsEmpty(__func__);
ThrowIfTimeIsInvalid(__func__, t);
return this->DoEvaluate(t);
}
/// Evaluates the output value's `n`th scalar element (0-indexed) at the
/// given time @p t.
/// @note On some implementations, the computational cost of this
/// method may be lower than that of indexing an Evaluate(const T&)
/// call return vector value, thus making it the preferred mechanism
/// when targeting a single dimension.
/// @param t Time at which to evaluate output.
/// @param n The nth scalar element (0-indexed) of the output
/// value to evaluate.
/// @returns Output value's `n`th scalar element (0-indexed).
/// @pre Output is not empty i.e. is_empty() equals false.
/// @throws std::exception if any of the preconditions are not met.
/// @throws std::exception if given @p t is not within output's domain
/// i.e. @p t ∉ [start_time(), end_time()].
/// @throws std::exception if given @p n does not refer to a valid
/// output dimension i.e. @p n ∉ [0, size()).
T EvaluateNth(const T& t, int n) const {
ThrowIfOutputIsEmpty(__func__);
ThrowIfNthElementIsInvalid(__func__, n);
ThrowIfTimeIsInvalid(__func__, t);
return this->DoEvaluateNth(t, n);
}
/// Returns the output size (i.e. the number of elements in an
/// output value).
/// @pre Output is not empty i.e. is_empty() equals false.
/// @throws std::exception if any of the preconditions is not met.
int size() const {
ThrowIfOutputIsEmpty(__func__);
return this->do_size();
}
/// Checks whether the output is empty or not.
bool is_empty() const { return this->do_is_empty(); }
/// Returns output's start time, or in other words, the oldest time
/// `t` that it can be evaluated at e.g. via Evaluate().
/// @pre Output is not empty i.e. is_empty() equals false.
/// @throws std::exception if any of the preconditions is not met.
const T& start_time() const {
ThrowIfOutputIsEmpty(__func__);
return this->do_start_time();
}
/// Returns output's end time, or in other words, the newest time
/// `t` that it can be evaluated at e.g. via Evaluate().
/// @pre Output is not empty i.e. is_empty() equals false.
/// @throws std::exception if any of the preconditions is not met.
const T& end_time() const {
ThrowIfOutputIsEmpty(__func__);
return this->do_end_time();
}
protected:
DenseOutput() = default;
// @see Evaluate(const T&)
virtual VectorX<T> DoEvaluate(const T& t) const = 0;
// @remarks The computational cost of this method must
// be less than or equal to that of indexing
// DoEvaluate(const T&) return value.
// @see Evaluate(const T&, int)
virtual T DoEvaluateNth(const T& t, int n) const {
return this->DoEvaluate(t)(n);
}
// @see is_empty()
virtual bool do_is_empty() const = 0;
// @see size()
virtual int do_size() const = 0;
// @see start_time()
virtual const T& do_start_time() const = 0;
// @see end_time()
virtual const T& do_end_time() const = 0;
// Asserts that this dense output is not empty.
// @param func_name Call site name for error message clarity (i.e. __func__).
// @throws std::exception if output is empty i.e. is_empty() equals false.
void ThrowIfOutputIsEmpty(const char* func_name) const {
if (is_empty()) {
throw std::logic_error(fmt::format(
"{}(): Dense output is empty.", func_name));
}
}
// Asserts that the given element index @p n is valid for this dense output.
// @param func_name Call site name for error message clarity (i.e. __func__).
// @param n The nth scalar element (0-indexed) to be checked.
// @throws std::exception if given @p n does not refer to a valid
// output dimension i.e. @p n ∉ [0, size()).
void ThrowIfNthElementIsInvalid(const char* func_name, int n) const {
if (n < 0 || this->do_size() <= n) {
throw std::runtime_error(fmt::format(
"{}(): Index {} out of dense output [0, {}) range.",
func_name, n, this->do_size()));
}
}
// Asserts that the given time @p t is valid for this dense output.
// @param func_name Call site name for error message clarity (i.e. __func__).
// @param t Time to be checked.
// @throws std::exception if given @p t is not within output's domain
// i.e. @p t ∉ [start_time(), end_time()].
void ThrowIfTimeIsInvalid(const char* func_name, const T& t) const {
if (t < this->do_start_time() || t > this->do_end_time()) {
throw std::runtime_error(fmt::format(
"{}(): Time {} out of dense output [{}, {}] domain.",
func_name, t, this->do_start_time(), this->do_end_time()));
}
}
};
} // namespace systems
} // namespace drake
DRAKE_DECLARE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_SCALARS(
class drake::systems::DenseOutput)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/simulator_print_stats.h | #pragma once
#include "drake/systems/analysis/simulator.h"
namespace drake {
namespace systems {
/// This method outputs to stdout relevant simulation statistics for a
/// simulator that advanced the state of a system forward in time.
/// @param[in] simulator
/// The simulator to output statistics for.
template <typename T>
void PrintSimulatorStatistics(const Simulator<T>& simulator);
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/radau_integrator.cc | #include "drake/systems/analysis/radau_integrator.h"
#include <limits>
#include "drake/common/autodiff.h"
#include "drake/common/fmt_eigen.h"
namespace drake {
namespace systems {
template <typename T, int num_stages>
void RadauIntegrator<T, num_stages>::DoResetImplicitIntegratorStatistics() {
num_nr_iterations_ = 0;
num_err_est_jacobian_reforms_ = 0;
num_err_est_jacobian_function_evaluations_ = 0;
num_err_est_iter_factorizations_ = 0;
num_err_est_function_evaluations_ = 0;
num_err_est_nr_iterations_ = 0;
}
template <typename T, int num_stages>
RadauIntegrator<T, num_stages>::RadauIntegrator(const System<T>& system,
Context<T>* context) : ImplicitIntegrator<T>(system, context) {
A_.resize(num_stages, num_stages);
// TODO(edrumwri) Convert A_, c_, b_, and d_ to fixed-size via "if constexpr".
if (num_stages == 2) {
// Set the matrix coefficients (from [Hairer, 1996] Table 5.5).
A_(0, 0) = 5.0/12; A_(0, 1) = -1.0/12;
A_(1, 0) = 3.0/4; A_(1, 1) = 1.0/4;
// Set the time coefficients (from the same table).
c_ = { 1.0/3, 1.0 };
// Set the propagation constants (again, from the same table).
b_ = { 3.0/4, 1.0/4 };
// Set the scaling constants for the solution using (8.2b) and Table 5.6.
d_ = { 0.0, 1.0 };
} else {
// For implicit Euler integration.
A_(0, 0) = 1.0;
c_ = { 1.0 };
b_ = { 1.0 };
d_ = { 1.0 };
}
}
template <typename T, int num_stages>
void RadauIntegrator<T, num_stages>::DoInitialize() {
using std::isnan;
// Compute the tensor product of A with the identity matrix. A is a
// num_stages x num_stages matrix. We need the tensor product to be a
// m x m-dimensional matrix, where m = num_stages * state_dim. Thus the
// number of rows/columns of the identity matrix is state_dim.
const int state_dim =
this->get_context().get_continuous_state_vector().size();
// Compute A ⊗ I.
// TODO(edrumwri) The resulting matrix only has s²n non-zeros out of s²n²
// elements (where s is the number of stages)- take advantage of this.
A_tp_eye_ = CalcTensorProduct(A_, MatrixX<T>::Identity(state_dim, state_dim));
F_of_Z_.resize(state_dim * num_stages);
// Allocate storage for changes to state variables during Newton-Raphson.
dx_state_ = this->get_system().AllocateTimeDerivatives();
// TODO(edrumwri): Find the best values for the method.
// These values are expected to be good for the two-stage and one-stage
// methods, respectively.
const double kDefaultAccuracy = (num_stages == 2) ? 1e-3 : 1e-1;
const double kLoosestAccuracy = (num_stages == 2) ? 1e-2 : 5e-1;
// Set an artificial step size target, if not set already.
if (isnan(this->get_initial_step_size_target())) {
// Verify that maximum step size has been set.
if (isnan(this->get_maximum_step_size())) {
throw std::logic_error("Neither initial step size target nor maximum "
"step size has been set!");
}
this->request_initial_step_size_target(
this->get_maximum_step_size());
}
// If the user asks for accuracy that is looser than the loosest this
// integrator can provide, use the integrator's loosest accuracy setting
// instead.
double working_accuracy = this->get_target_accuracy();
if (isnan(working_accuracy))
working_accuracy = kDefaultAccuracy;
else if (working_accuracy > kLoosestAccuracy)
working_accuracy = kLoosestAccuracy;
this->set_accuracy_in_use(working_accuracy);
// Reset the Jacobian matrix (so that recomputation is forced).
this->get_mutable_jacobian().resize(0, 0);
// Instantiate the embedded third order Bogacki-Shampine3 integrator. Note
// that we do not worry about setting the initial step size, since that code
// will never be triggered (the integrator will always be used in fixed-step
// mode).
bs3_ = std::make_unique<BogackiShampine3Integrator<T>>(
this->get_system(),
this->get_mutable_context());
// Instantiate the embedded second-order Runge-Kutta integrator.
rk2_ = std::make_unique<RungeKutta2Integrator<T>>(
this->get_system(),
std::numeric_limits<double>::max() /* no maximum step size */,
this->get_mutable_context());
// Maximum step size is not to be a constraint.
bs3_->set_maximum_step_size(std::numeric_limits<double>::max());
bs3_->Initialize();
rk2_->Initialize();
bs3_->set_fixed_step_mode(true);
// Note: RK2 only operates in fixed step mode.
}
template <typename T, int num_stages>
const VectorX<T>& RadauIntegrator<T, num_stages>::ComputeFofZ(
const T& t0, const T& h, const VectorX<T>& xt0, const VectorX<T>& Z) {
Context<T>* context = this->get_mutable_context();
const int state_dim = xt0.size();
// Evaluate the derivative at each stage.
for (int i = 0, j = 0; i < num_stages; ++i, j += state_dim) {
const auto Z_i = Z.segment(j, state_dim);
context->SetTimeAndContinuousState(t0 + c_[i] * h, xt0 + Z_i);
auto F_i = F_of_Z_.segment(j, state_dim);
F_i = this->EvalTimeDerivatives(*context).CopyToVector();
}
return F_of_Z_;
}
template <typename T, int num_stages>
void RadauIntegrator<T, num_stages>::ComputeSolutionFromIterate(
const VectorX<T>& xt0, const VectorX<T>& Z, VectorX<T>* xtplus) const {
const int state_dim = xt0.size();
// Compute the solution using (IV.8.2b) in [Hairer, 1996].
xtplus->setZero();
for (int i = 0, j = 0; i < num_stages; ++i, j += state_dim) {
if (d_[i] != 0.0)
*xtplus += d_[i] * Z.segment(j, state_dim);
}
*xtplus += xt0;
}
template <typename T, int num_stages>
bool RadauIntegrator<T, num_stages>::StepRadau(const T& t0, const T& h,
const VectorX<T>& xt0, VectorX<T>* xtplus, int trial) {
using std::max;
using std::min;
// Compute the time at the end of the step.
const T tf = t0 + h;
// Verify the trial number is valid.
DRAKE_ASSERT(1 <= trial && trial <= 4);
// Set the state.
Context<T>* context = this->get_mutable_context();
context->SetTimeAndContinuousState(t0, xt0);
const int state_dim = xt0.size();
// Verify xtplus
DRAKE_ASSERT(xtplus && xtplus->size() == state_dim);
DRAKE_LOGGER_DEBUG("StepRadau() entered for t={}, h={}, trial={}",
t0, h, trial);
// TODO(edrumwri) Experiment with setting this as recommended in
// [Hairer, 1996], p. 120.
// Initialize the z iterate using (IV.8.5) in [Hairer, 1996], p. 120 (and
// the corresponding xt+).
Z_.setZero(state_dim * num_stages);
*xtplus = xt0;
DRAKE_LOGGER_DEBUG("Starting state: {}", fmt_eigen(xtplus->transpose()));
// Set the iteration matrix construction method.
auto construct_iteration_matrix = [this](const MatrixX<T>& J, const T& dt,
typename ImplicitIntegrator<T>::IterationMatrix* iteration_matrix) {
ComputeRadauIterationMatrix(J, dt, this->A_, iteration_matrix);
};
// Calculate Jacobian and iteration matrices (and factorizations), as needed,
// around (t0, xt0). We do not do this calculation if full Newton is in use;
// the calculation will be performed at the beginning of the loop
// instead.
// TODO(edrumwri) Consider computing the Jacobian matrix around tf and/or
// xtplus. This would give a better Jacobian, but would
// complicate the logic, since the Jacobian would no longer
// (necessarily) be fresh upon fallback to a smaller step size.
if (!this->get_use_full_newton() &&
!this->MaybeFreshenMatrices(t0, xt0, h, trial, construct_iteration_matrix,
&iteration_matrix_radau_)) {
return false;
}
// Initialize the "last" norm of dx; this will be used to detect convergence.
T last_dx_norm = std::numeric_limits<double>::infinity();
// Do the Newton-Raphson iterations.
for (int iter = 0; iter < this->max_newton_raphson_iterations(); ++iter) {
DRAKE_LOGGER_DEBUG("Newton-Raphson iteration {}", iter);
this->FreshenMatricesIfFullNewton(
tf, *xtplus, h, construct_iteration_matrix, &iteration_matrix_radau_);
// Update the number of Newton-Raphson iterations.
++num_nr_iterations_;
// Evaluate the derivatives using the current iterate.
const VectorX<T>& F_of_Z = ComputeFofZ(t0, h, xt0, Z_);
// Compute the state update using (IV.8.4) in [Hairer, 1996], p. 119, i.e.:
// Solve (I − hA⊗J) ΔZᵏ = h (A⊗I) F(Zᵏ) - Zᵏ for ΔZᵏ, where:
// A_tp_eye ≡ (A⊗I) and (I − hA⊗J) is the iteration matrix.
DRAKE_LOGGER_DEBUG("residual: {}",
fmt_eigen((A_tp_eye_ * (h * F_of_Z) - Z_).transpose()));
VectorX<T> dZ = iteration_matrix_radau_.Solve(
A_tp_eye_ * (h * F_of_Z) - Z_);
// Update the iterate.
Z_ += dZ;
// Compute the update to the actual continuous state (i.e., x not Z) using
// (IV.8.2b) in [Hairer, 1996], which gives the relationship between x(t0+h)
// and Z:
// x(t0+h) = x(t0) + Σ dᵢ Zᵢ
// Therefore, we can get the relationship between dZ and dx as:
// x* = x(t0) + Σ dᵢ Zᵢ (1)
// x+ = x(t0) + Σ dᵢ (Zᵢ + dZᵢ) (2)
// Subtracting (1) from (2) yields
// dx = Σ dᵢ Zᵢ
// where dx ≡ x+ - x*
VectorX<T> dx = VectorX<T>::Zero(state_dim);
for (int i = 0, j = 0; i < num_stages; ++i, j += state_dim) {
if (d_[i] != 0.0)
dx += d_[i] * dZ.segment(j, state_dim);
}
dx_state_->SetFromVector(dx);
DRAKE_LOGGER_DEBUG("dx: {}", fmt_eigen(dx.transpose()));
// Get the infinity norm of the weighted update vector.
dx_state_->get_mutable_vector().SetFromVector(dx);
T dx_norm = this->CalcStateChangeNorm(*dx_state_);
// Compute the update.
ComputeSolutionFromIterate(xt0, Z_, &(*xtplus));
// Check for Newton-Raphson convergence.
typename ImplicitIntegrator<T>::ConvergenceStatus status =
this->CheckNewtonConvergence(iter, *xtplus, dx, dx_norm, last_dx_norm);
// If it converged, we're done.
if (status == ImplicitIntegrator<T>::ConvergenceStatus::kConverged)
return true;
// If it diverged, we have to abort and try again.
if (status == ImplicitIntegrator<T>::ConvergenceStatus::kDiverged)
break;
// Otherwise, continue to the next Newton-Raphson iteration.
DRAKE_DEMAND(status ==
ImplicitIntegrator<T>::ConvergenceStatus::kNotConverged);
// Update the norm of the state update.
last_dx_norm = dx_norm;
}
DRAKE_LOGGER_DEBUG("StepRadau() convergence failed");
// If Jacobian and iteration matrix factorizations are not reused, there
// is nothing else we can try; otherwise, the following code will recurse
// into this function again, and freshen computations as helpful. Note that
// get_reuse() returns false if "full Newton-Raphson" mode is activated (see
// ImplicitIntegrator::get_use_full_newton()).
if (!this->get_reuse())
return false;
// Try StepRadau again, freshening Jacobians and iteration matrix
// factorizations as helpful.
return StepRadau(t0, h, xt0, xtplus, trial+1);
}
template <typename T, int num_stages>
bool RadauIntegrator<T, num_stages>::StepImplicitTrapezoid(const T& t0,
const T& h, const VectorX<T>& xt0, const VectorX<T>& dx0,
const VectorX<T>& radau_xtplus, VectorX<T>* xtplus) {
using std::abs;
DRAKE_LOGGER_DEBUG("StepImplicitTrapezoid(h={}) t={}",
h, t0);
// Define g(x(t+h)) ≡ x(t+h) - x(t) - h/2 (f(t,x(t)) + f(t+h,x(t+h)) and
// evaluate it at the current x(t+h).
Context<T>* context = this->get_mutable_context();
std::function<VectorX<T>()> g =
[&xt0, h, &dx0, context, this]() {
return (context->get_continuous_state().CopyToVector() - xt0 - h/2 *
(dx0 + this->EvalTimeDerivatives(
this->get_context()).CopyToVector())).eval();
};
// Store statistics before calling StepAbstract(). The difference between
// the modified statistics and the stored statistics will be used to compute
// the trapezoid method-specific statistics.
int stored_num_jacobian_evaluations = this->get_num_jacobian_evaluations();
int stored_num_iter_factorizations =
this->get_num_iteration_matrix_factorizations();
int64_t stored_num_function_evaluations =
this->get_num_derivative_evaluations();
int64_t stored_num_jacobian_function_evaluations =
this->get_num_derivative_evaluations_for_jacobian();
int stored_num_nr_iterations = this->get_num_newton_raphson_iterations();
// Step.
bool success = StepImplicitTrapezoidDetail(
t0, h, xt0, g, radau_xtplus, xtplus);
// Move statistics to implicit trapezoid-specific.
num_err_est_jacobian_reforms_ +=
this->get_num_jacobian_evaluations() - stored_num_jacobian_evaluations;
num_err_est_iter_factorizations_ +=
this->get_num_iteration_matrix_factorizations() -
stored_num_iter_factorizations;
num_err_est_function_evaluations_ +=
this->get_num_derivative_evaluations() - stored_num_function_evaluations;
num_err_est_jacobian_function_evaluations_ +=
this->get_num_derivative_evaluations_for_jacobian() -
stored_num_jacobian_function_evaluations;
num_err_est_nr_iterations_ += this->get_num_newton_raphson_iterations() -
stored_num_nr_iterations;
return success;
}
template <typename T, int num_stages>
bool RadauIntegrator<T, num_stages>::StepImplicitTrapezoidDetail(
const T& t0, const T& h,
const VectorX<T>& xt0, const std::function<VectorX<T>()>& g,
const VectorX<T>& radau_xtplus, VectorX<T>* xtplus, int trial) {
using std::max;
using std::min;
// Verify the trial number is valid.
DRAKE_ASSERT(trial >= 1 && trial <= 4);
// Verify xtplus.
Context<T>* context = this->get_mutable_context();
DRAKE_ASSERT(xtplus &&
xtplus->size() == context->get_continuous_state_vector().size());
// Start from the Radau solution, which is close (either O(h³) accurate or
// O(h) accurate, depending on the number of stages) to the true solution and
// hence should be an excellent starting point.
*xtplus = radau_xtplus;
DRAKE_LOGGER_DEBUG("Starting state: {}", fmt_eigen(xtplus->transpose()));
DRAKE_LOGGER_DEBUG("StepImplicitTrapezoidDetail() entered for t={}, "
"h={}, trial={}", t0, h, trial);
// Advance the context time; this means that all derivatives will be computed
// at t+h. Compare against StepRadau, which uses ComputeFofZ (which
// automatically updates the Context to the correct time and state).
const T tf = t0 + h;
context->SetTimeAndContinuousState(tf, *xtplus);
// Initialize the "last" state update norm; this will be used to detect
// convergence.
T last_dx_norm = std::numeric_limits<double>::infinity();
// TODO(edrumwri) Consider computing the Jacobian matrix around tf.
// Calculate Jacobian and iteration matrices (and factorizations), as needed.
// TODO(edrumwri) Consider computing the Jacobian matrix around xtplus. This
// would give a better Jacobian, but would complicate the
// logic, since the Jacobian would no longer (necessarily) be
// fresh upon fallback to a smaller step size.
if (!this->get_use_full_newton() &&
!this->MaybeFreshenMatrices(t0, xt0, h, trial,
ComputeImplicitTrapezoidIterationMatrix,
&iteration_matrix_implicit_trapezoid_)) {
return false;
}
for (int iter = 0; iter < this->max_newton_raphson_iterations(); ++iter) {
DRAKE_LOGGER_DEBUG("Newton-Raphson iteration {}", iter);
++num_nr_iterations_;
this->FreshenMatricesIfFullNewton(tf, *xtplus, h,
ComputeImplicitTrapezoidIterationMatrix,
&iteration_matrix_implicit_trapezoid_);
// Evaluate the residual error using the current x(t+h).
VectorX<T> goutput = g();
// Compute the state update using the equation A*x = -g(), where A is the
// iteration matrix.
// TODO(edrumwri): Allow caller to provide their own solver.
VectorX<T> dx = iteration_matrix_implicit_trapezoid_.Solve(-goutput);
DRAKE_LOGGER_DEBUG("dx: {}", fmt_eigen(dx.transpose()));
// Get the infinity norm of the weighted update vector.
dx_state_->get_mutable_vector().SetFromVector(dx);
T dx_norm = this->CalcStateChangeNorm(*dx_state_);
// Update the state vector.
*xtplus += dx;
context->SetTimeAndContinuousState(tf, *xtplus);
// Check for Newton-Raphson convergence.
typename ImplicitIntegrator<T>::ConvergenceStatus status =
this->CheckNewtonConvergence(iter, *xtplus, dx, dx_norm, last_dx_norm);
// If it converged, we're done.
if (status == ImplicitIntegrator<T>::ConvergenceStatus::kConverged)
return true;
// If it diverged, we have to abort and try again.
if (status == ImplicitIntegrator<T>::ConvergenceStatus::kDiverged)
break;
// Otherwise, continue to the next Newton-Raphson iteration.
DRAKE_DEMAND(status ==
ImplicitIntegrator<T>::ConvergenceStatus::kNotConverged);
// Update the norm of the state update.
last_dx_norm = dx_norm;
}
DRAKE_LOGGER_DEBUG("StepImplicitTrapezoidDetail() convergence "
"failed");
// If Jacobian and iteration matrix factorizations are not reused, there
// is nothing else we can try. Note that get_reuse() returns false if
// "full Newton-Raphson" mode is activated (see
// ImplicitIntegrator::get_use_full_newton()).
if (!this->get_reuse())
return false;
// Try the step again, freshening Jacobians and iteration matrix
// factorizations as helpful.
return StepImplicitTrapezoidDetail(
t0, h, xt0, g, radau_xtplus, xtplus, trial + 1);
}
template <typename T, int num_stages>
bool RadauIntegrator<T, num_stages>::AttemptStepPaired(const T& t0, const T& h,
const VectorX<T>& xt0, VectorX<T>* xtplus_radau, VectorX<T>* xtplus_itr) {
using std::abs;
DRAKE_ASSERT(xtplus_radau != nullptr);
DRAKE_ASSERT(xtplus_itr != nullptr);
DRAKE_ASSERT(xtplus_radau->size() == xt0.size());
DRAKE_ASSERT(xtplus_itr->size() == xt0.size());
// Set the time and state in the context.
this->get_mutable_context()->SetTimeAndContinuousState(t0, xt0);
// Compute the derivative at xt0. NOTE: the derivative is calculated at this
// point (early on in the integration process) in order to reuse the
// derivative evaluation, via the cache, from the last integration step (if
// possible).
const VectorX<T> dx0 = this->EvalTimeDerivatives(
this->get_context()).CopyToVector();
// Use the current state as the candidate value for the next state.
// [Hairer 1996] validates this choice (p. 120).
*xtplus_radau = xt0;
// Do the Radau step.
if (!StepRadau(t0, h, xt0, xtplus_radau)) {
DRAKE_LOGGER_DEBUG("Radau approach did not converge for "
"step size {}", h);
return false;
}
// The error estimation process uses the implicit trapezoid method, which
// is defined as:
// x(t+h) = x(t) + h/2 (f(t, x(t) + f(t+h, x(t+h))
// x(t+h) from the Radau method is presumably a good starting point.
// The error estimate for 2-stage (3rd order) Radau is derived as follows
// (thanks to Michael Sherman):
// x*(t+h) = xᵣ₃(t+h) + O(h⁴) [Radau3]
// = xₜ(t+h) + O(h³) [implicit trapezoid]
// where x*(t+h) is the true (generally unknown) answer that we seek.
// This implies:
// xᵣ₃(t+h) + O(h⁴) = xₜ(t+h) + O(h³)
// Given that the third order term subsumes the fourth order one:
// xᵣ₃(t+h) - xₜ(t+h) = O(h³)
// Therefore the asymptotic term is third order.
// For 1-stage (1st order) Radau, the error estimate is derived analogously:
// x*(t+h) = xᵣ₁(t+h) + O(h²) [Radau1]
// = xₜ(t+h) + O(h³) [implicit trapezoid]
// By the same reasoning as above, this implies that:
// xᵣ₁(t+h) - xₜ(t+h) = O(h²)
// In this case, the asymptotic term is second order.
// One subtlety in this analysis is that the first case (with Radau3) gives
// an error estimate for the implicit trapezoid method, while the second case
// gives an error estimate for the Radau1 method. Put another way: the higher
// order result is propagated in the 3rd order method while the lower order
// result is propagated in the 1st order method.
// Attempt to compute the implicit trapezoid solution.
if (StepImplicitTrapezoid(t0, h, xt0, dx0, *xtplus_radau, xtplus_itr)) {
// Reset the state to that computed by Radau3.
this->get_mutable_context()->SetTimeAndContinuousState(
t0 + h, *xtplus_radau);
return true;
} else {
DRAKE_LOGGER_DEBUG("Implicit trapezoid approach FAILED with a step"
"size that succeeded on Radau3.");
return false;
}
return true;
}
template <typename T, int num_stages>
void RadauIntegrator<T, num_stages>::ComputeAndSetErrorEstimate(
const VectorX<T>& xtplus_prop, const VectorX<T>& xtplus_embed) {
err_est_vec_ = xtplus_prop - xtplus_embed;
err_est_vec_ = err_est_vec_.cwiseAbs();
// Compute and set the error estimate.
DRAKE_LOGGER_DEBUG("Error estimate: {}", fmt_eigen(err_est_vec_.transpose()));
this->get_mutable_error_estimate()->get_mutable_vector().
SetFromVector(err_est_vec_);
}
template <typename T, int num_stages>
bool RadauIntegrator<T, num_stages>::DoImplicitIntegratorStep(const T& h) {
Context<T>* context = this->get_mutable_context();
// Save the current time and state.
const T t0 = context->get_time();
DRAKE_LOGGER_DEBUG("Radau DoStep(h={}) t={}", h, t0);
xt0_ = context->get_continuous_state().CopyToVector();
xtplus_prop_.resize(xt0_.size());
xtplus_embed_.resize(xt0_.size());
// If the requested h is less than the minimum step size, we'll advance time
// using an explicit Bogacki-Shampine/explicit Euler step, depending on the
// number of stages in use.
if (h < this->get_working_minimum_step_size()) {
DRAKE_LOGGER_DEBUG("-- requested step too small, taking explicit "
"step instead");
// We want to maintain the order of the error estimation process even as we
// take this very small step.
if (num_stages == 2) {
// The BS3 integrator provides exactly the same order as 2-stage
// Radau + embedded implicit trapezoid.
const int evals_before_bs3 = bs3_->get_num_derivative_evaluations();
DRAKE_DEMAND(bs3_->IntegrateWithSingleFixedStepToTime(t0 + h));
const int evals_after_bs3 = bs3_->get_num_derivative_evaluations();
this->get_mutable_error_estimate()->SetFrom(*bs3_->get_error_estimate());
this->add_derivative_evaluations(evals_after_bs3 - evals_before_bs3);
} else {
// First-order Euler + RK2 provides exactly the same order as 1-stage
// Radau + embedded implicit trapezoid.
DRAKE_DEMAND(num_stages == 1);
// Compute the Euler step.
xdot_ = this->EvalTimeDerivatives(*context).CopyToVector();
xtplus_prop_ = xt0_ + h * xdot_;
// Compute the RK2 step.
const int evals_before_rk2 = rk2_->get_num_derivative_evaluations();
DRAKE_DEMAND(rk2_->IntegrateWithSingleFixedStepToTime(t0 + h));
const int evals_after_rk2 = rk2_->get_num_derivative_evaluations();
// Update the error estimation ODE counts.
num_err_est_function_evaluations_ += (evals_after_rk2 - evals_before_rk2);
// Store the embedded solution.
xtplus_embed_ = context->get_continuous_state().CopyToVector();
// Reset the state to the propagated solution.
context->SetTimeAndContinuousState(t0 + h, xtplus_prop_);
// Update the error estimate.
ComputeAndSetErrorEstimate(xtplus_prop_, xtplus_embed_);
}
} else {
// Try taking the requested step.
bool success = AttemptStepPaired(
t0, h, xt0_, &xtplus_prop_, &xtplus_embed_);
// If the step was not successful, reset the time and state.
if (!success) {
context->SetTimeAndContinuousState(t0, xt0_);
return false;
}
// Update the error estimate.
ComputeAndSetErrorEstimate(xtplus_prop_, xtplus_embed_);
}
return true;
}
template <typename T, int num_stages>
void RadauIntegrator<T, num_stages>::ComputeImplicitTrapezoidIterationMatrix(
const MatrixX<T>& J,
const T& h,
typename ImplicitIntegrator<T>::IterationMatrix* iteration_matrix) {
const int n = J.rows();
// TODO(edrumwri) Investigate how to do the below operation with a move.
iteration_matrix->SetAndFactorIterationMatrix(J * (-h / 2.0) +
MatrixX<T>::Identity(n, n));
}
template <typename T, int num_stages>
void RadauIntegrator<T, num_stages>::ComputeRadauIterationMatrix(
const MatrixX<T>& J,
const T& h,
const MatrixX<double>& A,
typename ImplicitIntegrator<T>::IterationMatrix* iteration_matrix) {
const int n = J.rows() * num_stages;
// TODO(edrumwri) Investigate how to do the below operation with a move.
// Computes I - h A ⊗ J.
iteration_matrix->SetAndFactorIterationMatrix(
CalcTensorProduct(A * -h, J) + MatrixX<T>::Identity(n , n));
}
template <typename T, int num_stages>
MatrixX<T> RadauIntegrator<T, num_stages>::CalcTensorProduct(
const MatrixX<T>& A, const MatrixX<T>& B) {
const int rows_A = A.rows();
const int cols_A = A.cols();
const int rows_B = B.rows();
const int cols_B = B.cols();
MatrixX<T> AB(rows_A * rows_B, cols_A * cols_B);
for (int i = 0, ii = 0; i < rows_A; ++i, ii += rows_B) {
for (int j = 0, jj = 0; j < cols_A; ++j, jj += cols_B) {
AB.block(ii, jj, rows_B, cols_B) = A(i, j) * B;
}
}
return AB;
}
} // namespace systems
} // namespace drake
// Define class template initializations for double and AutoDiffXd.
// Note: We don't use the macros in drake/common/default_scalars.h because
// those macros are designed for functions with only one template argument, and
// we need to instantiate both scalar types for both the Radau1 and Radau3
// integrators, which have num_stages set 1 and 2, respectively.
template class drake::systems::RadauIntegrator<double, 1>;
template class drake::systems::RadauIntegrator<drake::AutoDiffXd, 1>;
template class drake::systems::RadauIntegrator<double, 2>;
template class drake::systems::RadauIntegrator<drake::AutoDiffXd, 2>;
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/batch_eval.cc | #include "drake/systems/analysis/batch_eval.h"
#include <algorithm>
#include <memory>
#include <common_robotics_utilities/parallelism.hpp>
#include "drake/common/default_scalars.h"
namespace drake {
namespace systems {
using common_robotics_utilities::parallelism::DegreeOfParallelism;
using common_robotics_utilities::parallelism::ParallelForBackend;
using common_robotics_utilities::parallelism::StaticParallelForIndexLoop;
template <typename T>
MatrixX<T> BatchEvalUniquePeriodicDiscreteUpdate(
const System<T>& system, const Context<T>& context,
const Eigen::Ref<const RowVectorX<T>>& times,
const Eigen::Ref<const MatrixX<T>>& states,
const Eigen::Ref<const MatrixX<T>>& inputs, int num_time_steps,
std::variant<InputPortSelection, InputPortIndex> input_port_index,
Parallelism parallelize) {
system.ValidateContext(context);
double time_step{0.0};
DRAKE_THROW_UNLESS(system.IsDifferenceEquationSystem(&time_step));
const int num_evals = times.size();
DRAKE_THROW_UNLESS(states.rows() ==
context.get_discrete_state_vector().size());
DRAKE_THROW_UNLESS(states.cols() == num_evals);
const InputPort<T>* input_port =
system.get_input_port_selection(input_port_index);
if (input_port) {
DRAKE_THROW_UNLESS(input_port->get_data_type() ==
PortDataType::kVectorValued);
DRAKE_THROW_UNLESS(inputs.rows() == input_port->size());
DRAKE_THROW_UNLESS(inputs.cols() == num_evals);
}
DRAKE_THROW_UNLESS(num_time_steps > 0);
const int num_threads_to_use = parallelize.num_threads();
std::vector<std::unique_ptr<Context<T>>> context_pool(num_threads_to_use);
MatrixX<T> next_states = MatrixX<T>::Zero(states.rows(), num_evals);
const auto calc_next_state = [&](const int thread_num, const int64_t i) {
if (!context_pool[thread_num]) {
context_pool[thread_num] = context.Clone();
}
next_states.col(i) = states.col(i);
// The input port stays fixed for all of the steps.
if (input_port) {
input_port->FixValue(context_pool[thread_num].get(), inputs.col(i));
}
for (int step = 0; step < num_time_steps; ++step) {
// Set the time and state for this step.
context_pool[thread_num]->SetTime(times(i) + step * time_step);
context_pool[thread_num]->SetDiscreteState(next_states.col(i));
next_states.col(i) =
system.EvalUniquePeriodicDiscreteUpdate(*context_pool[thread_num])
.value();
}
};
StaticParallelForIndexLoop(DegreeOfParallelism(num_threads_to_use), 0,
num_evals, calc_next_state,
ParallelForBackend::BEST_AVAILABLE);
return next_states;
}
template <typename T>
MatrixX<T> BatchEvalTimeDerivatives(
const System<T>& system, const Context<T>& context,
const Eigen::Ref<const RowVectorX<T>>& times,
const Eigen::Ref<const MatrixX<T>>& states,
const Eigen::Ref<const MatrixX<T>>& inputs,
std::variant<InputPortSelection, InputPortIndex> input_port_index,
Parallelism parallelize) {
system.ValidateContext(context);
const int num_evals = times.size();
DRAKE_THROW_UNLESS(states.rows() == system.num_continuous_states());
DRAKE_THROW_UNLESS(states.cols() == num_evals);
const InputPort<T>* input_port =
system.get_input_port_selection(input_port_index);
if (input_port) {
DRAKE_THROW_UNLESS(input_port->get_data_type() ==
PortDataType::kVectorValued);
DRAKE_THROW_UNLESS(inputs.rows() == input_port->size());
DRAKE_THROW_UNLESS(inputs.cols() == num_evals);
}
const int num_threads_to_use = parallelize.num_threads();
std::vector<std::unique_ptr<Context<T>>> context_pool(num_threads_to_use);
MatrixX<T> derivatives = MatrixX<T>::Zero(states.rows(), num_evals);
const auto calc_derivatives = [&](const int thread_num, const int64_t i) {
if (!context_pool[thread_num]) {
context_pool[thread_num] = context.Clone();
}
context_pool[thread_num]->SetTime(times(i));
context_pool[thread_num]->SetContinuousState(states.col(i));
if (input_port) {
input_port->FixValue(context_pool[thread_num].get(), inputs.col(i));
}
derivatives.col(i) =
system.EvalTimeDerivatives(*context_pool[thread_num]).CopyToVector();
};
StaticParallelForIndexLoop(DegreeOfParallelism(num_threads_to_use), 0,
num_evals, calc_derivatives,
ParallelForBackend::BEST_AVAILABLE);
return derivatives;
}
DRAKE_DEFINE_FUNCTION_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_SCALARS(
(&BatchEvalUniquePeriodicDiscreteUpdate<T>, &BatchEvalTimeDerivatives<T>));
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/scalar_initial_value_problem.cc | #include "drake/systems/analysis/scalar_initial_value_problem.h"
namespace drake {
namespace systems {
template <typename T>
ScalarInitialValueProblem<T>::ScalarInitialValueProblem(
const ScalarOdeFunction& scalar_ode_function, const T& x0,
const Eigen::Ref<const VectorX<T>>& k) {
// Wraps the given scalar ODE function as a vector ODE function.
typename InitialValueProblem<T>::OdeFunction ode_function =
[scalar_ode_function](const T& t, const VectorX<T>& x,
const VectorX<T>& params) -> VectorX<T> {
return VectorX<T>::Constant(1, scalar_ode_function(t, x[0], params));
};
// Instantiates the vector initial value problem.
vector_ivp_ =
std::make_unique<InitialValueProblem<T>>(ode_function, Vector1<T>{x0}, k);
}
template <typename T>
T ScalarInitialValueProblem<T>::Solve(const T& t0, const T& tf) const {
return this->vector_ivp_->Solve(t0, tf)[0];
}
template <typename T>
std::unique_ptr<ScalarDenseOutput<T>> ScalarInitialValueProblem<T>::DenseSolve(
const T& t0, const T& tf) const {
// Delegates request to the vector form of this IVP by putting
// specified values in vector form and the resulting dense output
// back into scalar form.
const int kDimension = 0;
std::unique_ptr<DenseOutput<T>> vector_dense_output =
this->vector_ivp_->DenseSolve(t0, tf);
return std::make_unique<ScalarViewDenseOutput<T>>(
std::move(vector_dense_output), kDimension);
}
} // namespace systems
} // namespace drake
DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS(
class drake::systems::ScalarInitialValueProblem)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/hermitian_dense_output.cc | #include "drake/systems/analysis/hermitian_dense_output.h"
DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_SCALARS(
class drake::systems::HermitianDenseOutput)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/bogacki_shampine3_integrator.cc | #include "drake/systems/analysis/bogacki_shampine3_integrator.h"
#include <cmath>
#include <stdexcept>
#include "drake/common/drake_assert.h"
#include "drake/common/unused.h"
namespace drake {
namespace systems {
/*
* Bogacki-Shampine-specific initialization function.
* @throws std::exception if *neither* the initial step size target nor the
* maximum step size have been set before calling.
*/
template <class T>
void BogackiShampine3Integrator<T>::DoInitialize() {
using std::isnan;
const double kDefaultAccuracy = 1e-3; // Good for this particular integrator.
const double kLoosestAccuracy = 1e-1; // Integrator specific.
const double kMaxStepFraction = 0.1; // Fraction of max step size for
// less aggressive first step.
// Set an artificial step size target, if not set already.
if (isnan(this->get_initial_step_size_target())) {
// Verify that maximum step size has been set.
if (isnan(this->get_maximum_step_size()))
throw std::logic_error("Neither initial step size target nor maximum "
"step size has been set!");
this->request_initial_step_size_target(
this->get_maximum_step_size() * kMaxStepFraction);
}
// Sets the working accuracy to a good value.
double working_accuracy = this->get_target_accuracy();
// If the user asks for accuracy that is looser than the loosest this
// integrator can provide, use the integrator's loosest accuracy setting
// instead.
if (working_accuracy > kLoosestAccuracy)
working_accuracy = kLoosestAccuracy;
else if (isnan(working_accuracy))
working_accuracy = kDefaultAccuracy;
this->set_accuracy_in_use(working_accuracy);
}
template <class T>
bool BogackiShampine3Integrator<T>::DoStep(const T& h) {
using std::abs;
Context<T>& context = *this->get_mutable_context();
const T t0 = context.get_time();
// CAUTION: This is performance-sensitive inner loop code that uses dangerous
// long-lived references into state and cache to avoid unnecessary copying and
// cache invalidation. Be careful not to insert calls to methods that could
// invalidate any of these references before they are used.
// We use Butcher tableau notation with labels for each coefficient:
// 0 (c1) |
// 1/2 (c2) | 1/2 (a21)
// 3/4 (c3) | 0 (a31) 3/4 (a32)
// 1 (c4) | 2/9 (a41) 1/3 (a42) 4/9 (a43)
// ---------------------------------------------------------------------------
// 2/9 (b1) 1/3 (b2) 4/9 (b3) 0 (b4)
// 7/24 (d1) 1/4 (d2) 1/3 (d3) 1/8 (d4)
// Save the continuous state at t₀.
context.get_continuous_state_vector().CopyToPreSizedVector(&save_xc0_);
// Evaluate the derivative at t₀, xc₀.
derivs1_->get_mutable_vector().SetFrom(
this->EvalTimeDerivatives(context).get_vector());
const VectorBase<T>& k1 = derivs1_->get_vector();
// Cache: k1 references a *copy* of the derivative result so is immune
// to subsequent evaluations.
// Compute the first intermediate state and derivative (i.e., Stage 2).
// This call marks t- and xc-dependent cache entries out of date, including
// the derivative cache entry. Note that xc is a live reference into the
// context -- subsequent changes through that reference are unobservable so
// will require manual out-of-date notifications.
const double c2 = 1.0 / 2;
const double a21 = 1.0 / 2;
VectorBase<T>& xc = context.SetTimeAndGetMutableContinuousStateVector(
t0 + c2 * h);
xc.PlusEqScaled(a21 * h, k1);
// Evaluate the derivative (denoted k2) at t₀ + c2 * h, xc₀ + a21 * h * k1.
derivs2_->get_mutable_vector().SetFrom(
this->EvalTimeDerivatives(context).get_vector());
const VectorBase<T>& k2 = derivs2_->get_vector(); // xcdot⁽ᵃ⁾
// Cache: k2 references a *copy* of the derivative result so is immune
// to subsequent evaluations.
// Compute the second intermediate state and derivative (i.e., Stage 3).
const double c3 = 3.0 / 4;
const double a31 = 0.0;
const double a32 = 3.0 / 4;
// This call marks t- and xc-dependent cache entries out of date, including
// the derivative cache entry. (We already have the xc reference but must
// issue the out-of-date notification here since we're about to change it.)
context.SetTimeAndNoteContinuousStateChange(t0 + c3 * h);
// Evaluate the derivative (denoted k3) at t₀ + c3 * h,
// xc₀ + a31 * h * k1 + a32 * h * k2.
// Note that a31 is zero, so we leave that term out.
unused(a31);
xc.SetFromVector(save_xc0_); // Restore xc ← xc₀.
xc.PlusEqScaled({{a32 * h, k2}});
derivs3_->get_mutable_vector().SetFrom(
this->EvalTimeDerivatives(context).get_vector());
const VectorBase<T>& k3 = derivs3_->get_vector();
// Compute the propagated solution (we're able to do this because b1 = a41,
// b2 = a42, b3 = a43, and b4 = 0).
const double c4 = 1.0;
const double a41 = 2.0 / 9;
const double a42 = 1.0 / 3;
const double a43 = 4.0 / 9;
// This call marks t- and xc-dependent cache entries out of date, including
// the derivative cache entry. (We already have the xc reference but must
// issue the out-of-date notification here since we're about to change it.)
context.SetTimeAndNoteContinuousStateChange(t0 + c4 * h);
// Evaluate the derivative (denoted k4) at t₀ + c4 * h, xc₀ + a41 * h * k1 +
// a42 * h * k2 + a43 * h * k3. This will be used to compute the second
// order solution.
xc.SetFromVector(save_xc0_); // Restore xc ← xc₀.
xc.PlusEqScaled({{a41 * h, k1}, {a42 * h, k2}, {a43 * h, k3}});
const ContinuousState<T>& derivs4 = this->EvalTimeDerivatives(context);
const VectorBase<T>& k4 = derivs4.get_vector();
// WARNING: k4 is a live reference into the cache. Be careful of adding
// code below that modifies the context until after k4 is used below. In fact,
// it is best not to modify the context from here on out, as modifying the
// context will effectively destroy the FSAL benefit that this integrator
// provides.
// Compute the second order solution used for the error estimate and then
// the error estimate itself. The first part of this formula (the part that
// uses the d coefficients) computes the second error solution. The last part
// subtracts the third order propagated solution from that second order
// solution, thereby yielding the error estimate.
const double d1 = 7.0 / 24;
const double d2 = 1.0 / 4;
const double d3 = 1.0 / 3;
const double d4 = 1.0 / 8;
err_est_vec_->SetZero();
err_est_vec_->PlusEqScaled({{(a41 - d1) * h, k1},
{(a42 - d2) * h, k2},
{(a43 - d3) * h, k3},
{(-d4) * h, k4}});
// If the size of the system has changed, the error estimate will no longer
// be sized correctly. Verify that the error estimate is the correct size.
DRAKE_DEMAND(this->get_error_estimate()->size() == xc.size());
this->get_mutable_error_estimate()->SetFromVector(err_est_vec_->
CopyToVector().cwiseAbs());
// Bogacki-Shampine always succeeds in taking its desired step.
return true;
}
} // namespace systems
} // namespace drake
DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS(
class ::drake::systems::BogackiShampine3Integrator)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/runge_kutta5_integrator.cc | #include "drake/systems/analysis/runge_kutta5_integrator.h"
namespace drake {
namespace systems {
/*
* RK5-specific initialization function.
* @throws std::exception if *neither* the initial step size target nor the
* maximum step size has been set before calling.
*/
template <typename T>
void RungeKutta5Integrator<T>::DoInitialize() {
using std::isnan;
// TODO(drum) Verify the integrator-specific accuracy settings below.
const double kDefaultAccuracy = 1e-5;
const double kLoosestAccuracy = 1e-3;
const double kMaxStepFraction = 0.1; // Fraction of max step size for
// less aggressive first step.
// Set an artificial step size target, if not set already.
if (isnan(this->get_initial_step_size_target())) {
// Verify that maximum step size has been set.
if (isnan(this->get_maximum_step_size()))
throw std::logic_error(
"Neither initial step size target nor maximum "
"step size has been set!");
this->request_initial_step_size_target(this->get_maximum_step_size() *
kMaxStepFraction);
}
// Sets the working accuracy to a good value.
double working_accuracy = this->get_target_accuracy();
// If the user asks for accuracy that is looser than the loosest this
// integrator can provide, use the integrator's loosest accuracy setting
// instead.
if (working_accuracy > kLoosestAccuracy)
working_accuracy = kLoosestAccuracy;
else if (isnan(working_accuracy))
working_accuracy = kDefaultAccuracy;
this->set_accuracy_in_use(working_accuracy);
}
template <typename T>
bool RungeKutta5Integrator<T>::DoStep(const T& h) {
using std::abs;
Context<T>& context = *this->get_mutable_context();
const T t0 = context.get_time();
const T t1 = t0 + h;
// CAUTION: This is performance-sensitive inner loop code that uses dangerous
// long-lived references into state and cache to avoid unnecessary copying and
// cache invalidation. Be careful not to insert calls to methods that could
// invalidate any of these references before they are used.
// We use Butcher tableau notation with labels for each coefficient:
/*
0 (c1) |
1/5 (c2) | 1/5 (a21)
3/10 (c3) | 3/40 (a31) 9/40 (a32)
4/5 (c4) | 44/45 (a41) -56/15 (a42) 32/9 (a43)
8/9 (c5) | 19372/6561 (a51) −25360/2187 (a52) 64448/6561 (a53) −212/729 (a54) // NOLINT(*)
1 (c6) | 9017/3168 (a61) −355/33 (a62) 46732/5247 (a63) 49/176 (a64) −5103/18656 (a65) // NOLINT(*)
1 (c7) | 35/384 (a71) 0 (a72) 500/1113 (a73) 125/192 (a74) −2187/6784 (a75) 11/84 (a76) // NOLINT(*)
------------------------------------------------------------------------------------------------------------------------------------ // NOLINT(*)
35/384 (b1) 0 (b2) 500/1113 (b3) 125/192 (b4) −2187/6784 (b5) 11/84 (b6) 0 (b7) // NOLINT(*)
5179/57600 (d1) 0 (d2) 7571/16695 (d3) 393/640 (d4) −92097/339200 (d5) 187/2100 (d6) 1/40 (d7) // NOLINT(*)
*/
// Save the continuous state at t₀.
context.get_continuous_state_vector().CopyToPreSizedVector(&save_xc0_);
// Evaluate the derivative at t₀, xc₀ and copy the result into a temporary.
derivs1_->get_mutable_vector().SetFrom(
this->EvalTimeDerivatives(context).get_vector());
const VectorBase<T>& k1 = derivs1_->get_vector();
// Cache: k1 references a *copy* of the derivative result so is immune
// to subsequent evaluations.
// Compute the first intermediate state and derivative (i.e., Stage 2).
// This call marks t- and xc-dependent cache entries out of date, including
// the derivative cache entry. Note that xc is a live reference into the
// context -- subsequent changes through that reference are unobservable so
// will require manual out-of-date notifications.
const double c2 = 1.0 / 5;
const double a21 = 1.0 / 5;
VectorBase<T>& xc =
context.SetTimeAndGetMutableContinuousStateVector(t0 + c2 * h);
xc.PlusEqScaled(a21 * h, k1);
// Evaluate the derivative (denoted k2) at t₀ + c2 * h, xc₀ + a21 * h * k1.
derivs2_->get_mutable_vector().SetFrom(
this->EvalTimeDerivatives(context).get_vector());
const VectorBase<T>& k2 = derivs2_->get_vector();
// Cache: k2 references a *copy* of the derivative result so is immune
// to subsequent evaluations.
// Compute the second intermediate state and derivative (i.e., Stage 3).
const double c3 = 3.0 / 10;
const double a31 = 3.0 / 40;
const double a32 = 9.0 / 40;
// This call marks t- and xc-dependent cache entries out of date, including
// the derivative cache entry. (We already have the xc reference but must
// issue the out-of-date notification here since we're about to change it.)
context.SetTimeAndNoteContinuousStateChange(t0 + c3 * h);
// Evaluate the derivative (denoted k3) at t₀ + c3 * h,
// xc₀ + a31 * h * k1 + a32 * h * k2.
xc.SetFromVector(save_xc0_); // Restore xc ← xc₀.
xc.PlusEqScaled({{a31 * h, k1}, {a32 * h, k2}});
derivs3_->get_mutable_vector().SetFrom(
this->EvalTimeDerivatives(context).get_vector());
const VectorBase<T>& k3 = derivs3_->get_vector();
// Compute the third intermediate state and derivative (i.e., Stage 4).
const double c4 = 4.0 / 5;
const double a41 = 44.0 / 45;
const double a42 = -56.0 / 15;
const double a43 = 32.0 / 9;
// This call marks t- and xc-dependent cache entries out of date, including
// the derivative cache entry. (We already have the xc reference but must
// issue the out-of-date notification here since we're about to change it.)
context.SetTimeAndNoteContinuousStateChange(t0 + c4 * h);
// Evaluate the derivative (denoted k4) at t₀ + c4 * h,
// xc₀ + a41 * h * k1 + a42 * h * k2 + a43 * h * k3.
xc.SetFromVector(save_xc0_);
xc.PlusEqScaled({{a41 * h, k1}, {a42 * h, k2}, {a43 * h, k3}});
derivs4_->get_mutable_vector().SetFrom(
this->EvalTimeDerivatives(context).get_vector());
const VectorBase<T>& k4 = derivs4_->get_vector();
// Compute the fourth intermediate state and derivative (i.e., Stage 5).
const double c5 = 8.0 / 9;
const double a51 = 19372.0 / 6561;
const double a52 = -25360.0 / 2187;
const double a53 = 64448.0 / 6561;
const double a54 = -212.0 / 729;
// This call marks t- and xc-dependent cache entries out of date, including
// the derivative cache entry. (We already have the xc reference but must
// issue the out-of-date notification here since we're about to change it.)
context.SetTimeAndNoteContinuousStateChange(t0 + c5 * h);
// Evaluate the derivative (denoted k5) at t₀ + c5 * h,
// xc₀ + a51 * h * k1 + a52 * h * k2 + a53 * h * k3 + a54 * h * k4.
xc.SetFromVector(save_xc0_); // Restore xc ← xc₀.
xc.PlusEqScaled({{a51 * h, k1}, {a52 * h, k2}, {a53 * h, k3}, {a54 * h, k4}});
derivs5_->get_mutable_vector().SetFrom(
this->EvalTimeDerivatives(context).get_vector());
const VectorBase<T>& k5 = derivs5_->get_vector();
// Compute the fifth intermediate state and derivative (i.e., Stage 6).
const double a61 = 9017.0 / 3168;
const double a62 = -355.0 / 33;
const double a63 = 46732.0 / 5247;
const double a64 = 49.0 / 176;
const double a65 = -5103.0 / 18656;
// This call marks t- and xc-dependent cache entries out of date, including
// the derivative cache entry. (We already have the xc reference but must
// issue the out-of-date notification here since we're about to change it.)
context.SetTimeAndNoteContinuousStateChange(t1);
// Evaluate the derivative (denoted k6) at t₀ + c6 * h,
// xc₀ + a61 * h * k1 + a62 * h * k2 + a63 * h * k3 + a64 * h * k4 +
// a65 * h * k5.
xc.SetFromVector(save_xc0_);
xc.PlusEqScaled({{a61 * h, k1},
{a62 * h, k2},
{a63 * h, k3},
{a64 * h, k4},
{a65 * h, k5}});
derivs6_->get_mutable_vector().SetFrom(
this->EvalTimeDerivatives(context).get_vector());
const VectorBase<T>& k6 = derivs6_->get_vector();
// Cache: we're about to write through the xc reference again, so need to
// mark xc-dependent cache entries out of date; time doesn't change here.
context.NoteContinuousStateChange();
// Compute the propagated solution (we're able to do this because b1 = a71,
// b2 = a72, b3 = a73, b4 = a74, b5 = a75, and b6 = a76).
// Note that a72 is 0.0, so we leave that term out below.
const double a71 = 35.0 / 384;
const double a73 = 500.0 / 1113;
const double a74 = 125.0 / 192;
const double a75 = -2187.0 / 6784;
const double a76 = 11.0 / 84;
// This call marks t- and xc-dependent cache entries out of date, including
// the derivative cache entry. (We already have the xc reference but must
// issue the out-of-date notification here since we're about to change it.)
// Note that we use the simplification t1 = t0 + h * c7 = t0 + h * 1.
context.SetTimeAndNoteContinuousStateChange(t1);
// Evaluate the derivative (denoted k7) at t₀ + c7 * h,
// xc₀ + a71 * h * k1 + a72 * h * k2 + a73 * h * k3 + a74 * h * k4 +
// a75 * h * k5 + a76 * h * k6.
xc.SetFromVector(save_xc0_);
xc.PlusEqScaled({{a71 * h, k1},
{a73 * h, k3},
{a74 * h, k4},
{a75 * h, k5},
{a76 * h, k6}});
const ContinuousState<T>& derivs7 = this->EvalTimeDerivatives(context);
const VectorBase<T>& k7 = derivs7.get_vector();
// WARNING: k7 is a live reference into the cache. Be careful of adding
// code below that modifies the context until after k7 is used below. In fact,
// it is best not to modify the context from here on out, as modifying the
// context will effectively destroy the FSAL benefit that this integrator
// provides.
// Calculate the 4th-order solution that will be used for the error
// estimate and then the error estimate itself. The part of this
// formula that uses the "a" coefficients (re)-computes the fifth order
// solution. The part of this formula that uses the "d" coefficients computes
// the fourth order solution. The subtraction (and negation) operations
// yield the error estimate.
// Note: d2 is 0.0; it has been removed from the formula below.
const double d1 = 5179.0 / 57600;
const double d3 = 7571.0 / 16695;
const double d4 = 393.0 / 640;
const double d5 = -92097.0 / 339200;
const double d6 = 187.0 / 2100;
const double d7 = 1.0 / 40;
err_est_vec_->SetZero();
err_est_vec_->PlusEqScaled({{(a71 - d1) * h, k1},
{(a73 - d3) * h, k3},
{(a74 - d4) * h, k4},
{(a75 - d5) * h, k5},
{(a76 - d6) * h, k6},
{(-d7) * h, k7}});
// If the size of the system has changed, the error estimate will no longer
// be sized correctly. Verify that the error estimate is the correct size.
DRAKE_DEMAND(this->get_error_estimate()->size() == xc.size());
this->get_mutable_error_estimate()->SetFromVector(
err_est_vec_->CopyToVector().cwiseAbs());
// RK5 always succeeds in taking its desired step.
return true;
}
} // namespace systems
} // namespace drake
DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS(
class drake::systems::RungeKutta5Integrator)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/runge_kutta3_integrator.h | #pragma once
#include <memory>
#include <utility>
#include "drake/common/default_scalars.h"
#include "drake/common/drake_copyable.h"
#include "drake/systems/analysis/integrator_base.h"
namespace drake {
namespace systems {
/**
* A third-order Runge Kutta integrator with a third order error estimate.
*
* For a discussion of this Runge-Kutta method, see [Butcher, 1987]. The
* embedded error estimate was derived using the method mentioned in
* [Hairer, 1993].
*
* The Butcher tableau for this integrator follows:
* <pre>
* |
* 0 |
* 1/2 | 1/2
* 1 | -1 2
* ---------------------------------------------------------------------------
* 1/6 2/3 1/6
* 0 1 0
* </pre>
* where the second to last row is the 3rd-order propagated solution and
* the last row is the 2nd-order midpoint used for the error estimate.
*
* The following documentation is pulled from Simbody's implementation
* of this integrator:
* "This is a 3-stage, first-same-as-last (FSAL) 3rd order method which
* gives us an embedded 2nd order method as well, so we can extract
* a 3rd-order error estimate for the 2nd-order result, which error
* estimate can then be used for step size control, since it will
* behave as h^3. We then propagate the 3rd order result (whose error
* is unknown), which Hairer calls 'local extrapolation'.
* We call the initial state (t0,y0) and want (t0+h,y1). We are
* given the initial derivative f0=f(t0,y0), which most likely
* is left over from an evaluation at the end of the last step."
*
* - [Butcher, 1987] J. C. Butcher. The Numerical Analysis of Ordinary
* Differential Equations. John Wiley & Sons, 1987. p. 325.
* - [Hairer, 1993] E. Hairer, S. Noersett, and G. Wanner. Solving ODEs I. 2nd
* rev. ed. Springer, 1993. p. 166.
*
* @tparam_nonsymbolic_scalar
* @ingroup integrators
*/
template <class T>
class RungeKutta3Integrator final : public IntegratorBase<T> {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(RungeKutta3Integrator)
~RungeKutta3Integrator() override = default;
explicit RungeKutta3Integrator(const System<T>& system,
Context<T>* context = nullptr)
: IntegratorBase<T>(system, context) {
derivs0_ = system.AllocateTimeDerivatives();
derivs1_ = system.AllocateTimeDerivatives();
err_est_vec_.resize(derivs0_->size());
save_xc0_.resize(derivs0_->size());
}
/**
* The integrator supports error estimation.
*/
bool supports_error_estimation() const override { return true; }
/// This integrator provides third order error estimates.
int get_error_estimate_order() const override { return 3; }
private:
void DoInitialize() override;
bool DoStep(const T& h) override;
// Vector used in error estimate calculations.
VectorX<T> err_est_vec_;
// Vector used to save initial value of xc.
VectorX<T> save_xc0_;
// These are pre-allocated temporaries for use by integration. They store
// the derivatives computed at various points within the integration
// interval.
std::unique_ptr<ContinuousState<T>> derivs0_, derivs1_;
};
} // namespace systems
} // namespace drake
DRAKE_DECLARE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS(
class drake::systems::RungeKutta3Integrator)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/runge_kutta5_integrator.h | #pragma once
#include <memory>
#include <utility>
#include "drake/common/default_scalars.h"
#include "drake/common/drake_copyable.h"
#include "drake/systems/analysis/integrator_base.h"
namespace drake {
namespace systems {
/**
A fifth-order, seven-stage, first-same-as-last (FSAL) Runge Kutta integrator
with a fourth order error estimate.
For a discussion of this Runge-Kutta method, see [Dormand, 1980] and
[Hairer, 1993]. The embedded error estimate was derived as described
in [Hairer, 1993], where all the coefficients are tabulated.
The Butcher tableau for this integrator follows:
<pre>
0 |
1/5 | 1/5
3/10 | 3/40 9/40
4/5 | 44/45 -56/15 32/9
8/9 | 19372/6561 −25360/2187 64448/6561 −212/729
1 | 9017/3168 −355/33 46732/5247 49/176 −5103/18656
1 | 35/384 0 500/1113 125/192 −2187/6784 11/84 <!-- NOLINT(*) -->
--------------------------------------------------------------------------------- <!-- NOLINT(*) -->
35/384 0 500/1113 125/192 −2187/6784 11/84 0 <!-- NOLINT(*) -->
5179/57600 0 7571/16695 393/640 −92097/339200 187/2100 1/40 <!-- NOLINT(*) -->
</pre>
where the second to last row is the 5th-order (propagated) solution and
the last row gives a 4th-order accurate solution used for error control.
- [Dormand, 1980] J. Dormand and P. Prince. "A family of embedded
Runge-Kutta formulae", Journal of Computational and Applied Mathematics,
1980, 6(1): 19–26.
- [Hairer, 1993] E. Hairer, S. Nørsett, and G. Wanner. Solving ODEs I. 2nd
rev. ed. Springer, 1993. pp. 178-9.
@tparam_nonsymbolic_scalar
@ingroup integrators
*/
template <typename T>
class RungeKutta5Integrator final : public IntegratorBase<T> {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(RungeKutta5Integrator)
~RungeKutta5Integrator() override = default;
explicit RungeKutta5Integrator(const System<T>& system,
Context<T>* context = nullptr)
: IntegratorBase<T>(system, context) {
derivs1_ = system.AllocateTimeDerivatives();
derivs2_ = system.AllocateTimeDerivatives();
derivs3_ = system.AllocateTimeDerivatives();
derivs4_ = system.AllocateTimeDerivatives();
derivs5_ = system.AllocateTimeDerivatives();
derivs6_ = system.AllocateTimeDerivatives();
err_est_vec_ = std::make_unique<BasicVector<T>>(derivs1_->size());
save_xc0_.resize(derivs1_->size());
}
/**
* The integrator supports error estimation.
*/
bool supports_error_estimation() const override { return true; }
/// The order of the asymptotic term in the error estimate.
int get_error_estimate_order() const override { return 4; }
private:
void DoInitialize() override;
bool DoStep(const T& h) override;
// Vector used in error estimate calculations.
std::unique_ptr<BasicVector<T>> err_est_vec_;
// Vector used to save initial value of xc.
VectorX<T> save_xc0_;
// These are pre-allocated temporaries for use by integration. They store
// the derivatives computed at various points within the integration
// interval.
std::unique_ptr<ContinuousState<T>> derivs1_, derivs2_, derivs3_, derivs4_,
derivs5_, derivs6_;
};
} // namespace systems
} // namespace drake
DRAKE_DECLARE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS(
class drake::systems::RungeKutta5Integrator)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/monte_carlo.h | #pragma once
#include <functional>
#include <memory>
#include <utility>
#include <vector>
#include "drake/common/parallelism.h"
#include "drake/systems/analysis/simulator.h"
namespace drake {
namespace systems {
namespace analysis {
/***
* Defines a factory method that constructs a Simulator (with an owned System)
* using the supplied RandomGenerator as the only source of randomness.
*
* Note that in many interesting cases, the SimulatorFactory may simply ignore
* the RandomGenerator argument and return the Simulator object
* deterministically, because randomness may also be introduced *inside* the
* simulation (by SetRandomContext and/or random input ports).
*
* Also consider that the System referenced by the Simulator returned by this
* method must have a lifetime that lasts beyond any calls this Simulator.
* Having the Simulator own the System (by calling the unique_ptr version of
* the constructor) is one convenient solution.
*/
typedef std::function<std::unique_ptr<Simulator<double>>(
RandomGenerator* generator)>
SimulatorFactory;
/***
* Defines an arbitrary scalar function of the Context. This is used in the
* RandomSimulation and MonteCarloSimulation tools below as a way of defining
* the output random variable of interest -- the ScalarSystemFunction is
* evaluated with the final conditions of the simulation and the double that is
* returned is the value of the random variable. (Although there is no
* randomness in the ScalarSystemFunction itself, in the RandomSimulation case
* the final Context will be random, so functions of that context will also be
* random).
*/
typedef std::function<double(const System<double>& system,
const Context<double>& context)>
ScalarSystemFunction;
/**
* Run a deterministic simulation of a (stochastic) System using the @p
* generator to instantiate all "random" quantities.
*
* In pseudo-code, this algorithm implements:
* @code
* simulator = make_simulator(generator)
* simulator.get_system().SetRandomContext(generator)
* simulator.AdvanceTo(final_time)
* return output(simulator.get_context())
* @endcode
*
* @param make_simulator Callers to this method define a stochastic simulation
* by providing the @p make_simulator factory method to return a Simulator
* using the supplied RandomGenerator as the only source of randomness.
* This interface was designed to support cases where the System/Diagram is
* random (not only the Context), e.g. in the case where are variable number
* of objects are added to a multibody simulation.
*
* @param output The scalar random variable output, denoted @p output, is
* defined as a function of the Simulator's System's Context, evaluated at
* the @p final_time. Monte-Carlo investigations that studying the details
* of an entire trajectory can still use this interface, e.g. by including a
* "runtime monitor" System that latches the worst-case deviation of a
* specification into it's Context to be queried at the final time.
*
* @param final_time The time that each instance of the Simulator is
* stepped to. In many cases, this will be equivalent to the duration of
* the simulation, but it need not be because SetRandomContext() could
* initialize the time to a non-zero value, or an event could trigger
* premature termination of the simulation (see #4447).
*
* @param generator Random number generator to be used to generate the
* random samples.
*
* @return the @p output evaluated from the Context at @p final_time.
*
* @ingroup analysis
*/
double RandomSimulation(const SimulatorFactory& make_simulator,
const ScalarSystemFunction& output, double final_time,
RandomGenerator* generator);
/**
* A snapshot of the generator used to produce the random simulation. Use,
* e.g.,
* @code
* RandomGenerator generator(result.generator_snapshot)
* RandomSimulation(make_simulator, output, final_time, &generator)
* @endcode
* for a deterministic playback of the sampled simulation.
* RandomNumberEngine concept</a>, if you wish to serialize the results.
* Note that performing any non-const operations on generator_snapshot may
* advance the state of the generator and make it no longer capable of
* reproducing the simulation.
*/
struct RandomSimulationResult {
DRAKE_DEFAULT_COPY_AND_MOVE_AND_ASSIGN(RandomSimulationResult)
explicit RandomSimulationResult(const RandomGenerator& generator,
double value = 0.0)
: generator_snapshot(generator), output(value) {}
RandomGenerator generator_snapshot;
double output{};
};
/**
* Generates samples of a scalar random variable output by running many
* random simulations drawn from independent samples of the
* distributions governing the stochastic simulation.
*
* In pseudo-code, this algorithm implements:
* @code
* for i=1:num_samples
* const generator_snapshot = deepcopy(generator)
* output = RandomSimulation(..., generator)
* data(i) = std::pair(generator_snapshot, output)
* return data
* @endcode
*
* @see RandomSimulation() for details about @p make_simulator, @p output,
* and @p final_time.
*
* @param num_samples Number of independent samples to draw from the
* distribution (and equivalently, the number of simulations to run).
*
* @param generator Random number generator to be used to generate the
* random samples. If null, then a new RandomGenerator will be allocated
* and used internally (and repeated calls to this method will return
* identical results). To produce statistically "independent" samples on a
* future call to MonteCarloSimulation, you should make repeated uses of the
* same RandomGenerator object.
*
* @param parallelism Specify number of parallel executions to use while
* performing `num_samples` simulations. The default value (false) specifies
* that simulations should be executed in serial. To use the concurrency
* available on your hardware, specify either `Parallellism::Max()` or its terse
* abbreviation `true`.
*
* @returns a list of RandomSimulationResult's.
*
* Thread safety when parallel execution is specified:
* - @p make_simulator and @p generator are only accessed from the main thread.
*
* - Each simulator created by @p make_simulator and its context are only
* accessed from within a single worker thread; however, any resource shared
* between these simulators must be safe for concurrent use.
*
* - @p output is called from within worker threads performing simulation with
* the simulator and context belonging to each worker thread. It must be safe
* to make concurrent calls to @p output (i.e. any mutable state inside the
* function must be safe for concurrent use).
*
* @ingroup analysis
*/
// TODO(russt): Consider generalizing this with options (e.g. setting the
// number of simulators, number of samples per simulator, ...).
std::vector<RandomSimulationResult> MonteCarloSimulation(
const SimulatorFactory& make_simulator, const ScalarSystemFunction& output,
double final_time, int num_samples, RandomGenerator* generator = nullptr,
Parallelism parallelism = false);
// The below functions are exposed for unit testing only.
namespace internal {
/* Used by MonteCarloSimulation to select number of threads to use. */
int SelectNumberOfThreadsToUse(int num_parallel_executions);
} // namespace internal
} // namespace analysis
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/stepwise_dense_output.h | #pragma once
#include "drake/common/default_scalars.h"
#include "drake/common/drake_copyable.h"
#include "drake/systems/analysis/dense_output.h"
namespace drake {
namespace systems {
/// A DenseOutput class interface extension, geared towards step-wise
/// construction procedures. Outputs of this kind are to be built
/// incrementally by means of discrete updates that extend its domain.
/// Nature of an update remains implementation specific.
///
/// To allow for update rectification (i.e. drop and replacement), in case it
/// fails to meet certain criteria (e.g. not within tolerances), construction
/// can be deferred to a consolidation step. In between consolidations, updates
/// can be rolled back (i.e. discarded) one by one on a last-input-first-output
/// basis. Implementations are thus encouraged to keep recent updates in a light
/// weight form, deferring heavier computations and construction of a better
/// suited representation for evaluation. As such, evaluation is bound to
/// succeed only after consolidation.
///
/// @tparam_default_scalar
template <typename T>
class StepwiseDenseOutput : public DenseOutput<T> {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(StepwiseDenseOutput)
~StepwiseDenseOutput() override = default;
/// Rolls back (drops) the last update.
/// @remarks This process is irreversible.
/// @pre Updates have taken place since instantiation or last
/// consolidation (via Consolidate()).
/// @throws std::exception if any of the preconditions is not met.
virtual void Rollback() = 0;
/// Consolidates latest updates.
///
/// All updates since last call or construction are put into a form
/// that is suitable for evaluation.
///
/// @remarks This process is irreversible.
/// @pre Updates have taken place since instantiation or last
/// consolidation.
/// @post The extents covered by updates since instantiation or
/// last consolidation can be evaluated (via Evaluate()).
/// @post Time extents covered by updates can be evaluated
/// (via start_time()/end_time()).
/// @throws std::exception if any of the preconditions is not met.
virtual void Consolidate() = 0;
protected:
StepwiseDenseOutput() = default;
};
} // namespace systems
} // namespace drake
DRAKE_DECLARE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_SCALARS(
class drake::systems::StepwiseDenseOutput)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/explicit_euler_integrator.cc | #include "drake/systems/analysis/explicit_euler_integrator.h"
DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_SCALARS(
class drake::systems::ExplicitEulerIntegrator)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/stepwise_dense_output.cc | #include "drake/systems/analysis/stepwise_dense_output.h"
DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_SCALARS(
class drake::systems::StepwiseDenseOutput)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/implicit_euler_integrator.cc | #include "drake/systems/analysis/implicit_euler_integrator.h"
#include <algorithm>
#include <cmath>
#include <limits>
#include <stdexcept>
#include <utility>
#include "drake/common/drake_assert.h"
#include "drake/common/fmt_eigen.h"
#include "drake/common/text_logging.h"
#include "drake/systems/analysis/runge_kutta2_integrator.h"
namespace drake {
namespace systems {
template <class T>
void ImplicitEulerIntegrator<T>::DoResetImplicitIntegratorStatistics() {
num_nr_iterations_ = 0;
hie_statistics_ = {};
itr_statistics_ = {};
}
template <class T>
void ImplicitEulerIntegrator<T>::DoResetCachedJacobianRelatedMatrices() {
ie_iteration_matrix_ = {};
itr_iteration_matrix_ = {};
}
template <class T>
void ImplicitEulerIntegrator<T>::DoInitialize() {
using std::isnan;
// Allocate storage for changes to state variables during Newton-Raphson.
dx_state_ = this->get_system().AllocateTimeDerivatives();
const double kDefaultAccuracy = 1e-1; // Good for this particular integrator.
const double kLoosestAccuracy = 5e-1; // Loosest accuracy is quite loose.
// Set an artificial step size target, if not set already.
if (isnan(this->get_initial_step_size_target())) {
// Verify that maximum step size has been set.
if (isnan(this->get_maximum_step_size()))
throw std::logic_error("Neither initial step size target nor maximum "
"step size has been set!");
this->request_initial_step_size_target(
this->get_maximum_step_size());
}
// Sets the working accuracy to a good value.
double working_accuracy = this->get_target_accuracy();
// If the user asks for accuracy that is looser than the loosest this
// integrator can provide, use the integrator's loosest accuracy setting
// instead.
if (isnan(working_accuracy))
working_accuracy = kDefaultAccuracy;
else if (working_accuracy > kLoosestAccuracy)
working_accuracy = kLoosestAccuracy;
this->set_accuracy_in_use(working_accuracy);
// Reset the Jacobian matrix (so that recomputation is forced).
this->get_mutable_jacobian().resize(0, 0);
// Initialize the embedded second order Runge-Kutta integrator. The maximum
// step size will be set to infinity because we will explicitly request the
// step sizes to be taken.
rk2_ = std::make_unique<RungeKutta2Integrator<T>>(
this->get_system(),
std::numeric_limits<double>::infinity() /* maximum step size */,
this->get_mutable_context());
}
template <class T>
void ImplicitEulerIntegrator<T>::ComputeAndFactorImplicitEulerIterationMatrix(
const MatrixX<T>& J, const T& h,
typename ImplicitIntegrator<T>::IterationMatrix* iteration_matrix) {
const int n = J.rows();
// TODO(edrumwri) Investigate using a move-type operation below.
// We form the iteration matrix in this particular way to avoid an O(n^2)
// subtraction as would be the case with:
// MatrixX<T>::Identity(n, n) - J * h.
iteration_matrix->SetAndFactorIterationMatrix(
J * -h + MatrixX<T>::Identity(n, n));
}
template <class T>
void ImplicitEulerIntegrator<T>::
ComputeAndFactorImplicitTrapezoidIterationMatrix(
const MatrixX<T>& J, const T& h,
typename ImplicitIntegrator<T>::IterationMatrix* iteration_matrix) {
const int n = J.rows();
// TODO(edrumwri) Investigate using a move-type operation below.
// We form the iteration matrix in this particular way to avoid an O(n^2)
// subtraction as would be the case with:
// MatrixX<T>::Identity(n, n) - J * h / 2.
iteration_matrix->SetAndFactorIterationMatrix(
J * (-h / 2.0) + MatrixX<T>::Identity(n, n));
}
template <class T>
bool ImplicitEulerIntegrator<T>::StepAbstract(
const T& t0, const T& h, const VectorX<T>& xt0,
const std::function<VectorX<T>()>& g,
const std::function<void(const MatrixX<T>&, const T&,
typename ImplicitIntegrator<T>::IterationMatrix*)>&
compute_and_factor_iteration_matrix,
const VectorX<T>& xtplus_guess,
typename ImplicitIntegrator<T>::IterationMatrix* iteration_matrix,
VectorX<T>* xtplus, int trial) {
using std::max;
using std::min;
// Verify the trial number is valid.
DRAKE_ASSERT(trial >= 1 && trial <= 4);
// Verify xtplus
DRAKE_ASSERT(xtplus && xtplus->size() == xt0.size());
DRAKE_LOGGER_DEBUG("StepAbstract() entered for t={}, h={}, trial={}",
t0, h, trial);
// Start from the guess.
*xtplus = xtplus_guess;
DRAKE_LOGGER_DEBUG("Starting state: {}", fmt_eigen(xtplus->transpose()));
// Advance the context time and state to compute derivatives at t0 + h.
const T tf = t0 + h;
Context<T>* context = this->get_mutable_context();
context->SetTimeAndContinuousState(tf, *xtplus);
// Initialize the "last" state update norm; this will be used to detect
// convergence.
T last_dx_norm = std::numeric_limits<double>::infinity();
// Calculate Jacobian and iteration matrices (and factorizations), as needed,
// around (t0, xt0). We do not do this calculation if full Newton is in use;
// the calculation will be performed at the beginning of the loop instead.
// TODO(edrumwri) Consider computing the Jacobian matrix around tf and/or
// xtplus. This would give a better Jacobian, but would
// complicate the logic, since the Jacobian would no longer
// (necessarily) be fresh upon fallback to a smaller step size.
if (!this->get_use_full_newton() &&
!this->MaybeFreshenMatrices(t0, xt0, h, trial,
compute_and_factor_iteration_matrix,
iteration_matrix)) {
return false;
}
// Do the Newton-Raphson iterations.
for (int i = 0; i < this->max_newton_raphson_iterations(); ++i) {
this->FreshenMatricesIfFullNewton(tf, *xtplus, h,
compute_and_factor_iteration_matrix,
iteration_matrix);
// Evaluate the residual error using:
// g(x(t0+h)) = x(t0+h) - x(t0) - h f(t0+h,x(t0+h)).
VectorX<T> goutput = g();
// Update the number of Newton-Raphson iterations.
num_nr_iterations_++;
// Compute the state update using the equation A*x = -g(), where A is the
// iteration matrix.
// TODO(edrumwri): Allow caller to provide their own solver.
VectorX<T> dx = iteration_matrix->Solve(-goutput);
// Get the infinity norm of the weighted update vector.
dx_state_->get_mutable_vector().SetFromVector(dx);
T dx_norm = this->CalcStateChangeNorm(*dx_state_);
// Update the state vector.
*xtplus += dx;
context->SetTimeAndContinuousState(tf, *xtplus);
// Check for Newton-Raphson convergence.
typename ImplicitIntegrator<T>::ConvergenceStatus status =
this->CheckNewtonConvergence(i, *xtplus, dx, dx_norm, last_dx_norm);
// If it converged, we're done.
if (status == ImplicitIntegrator<T>::ConvergenceStatus::kConverged)
return true;
// If it diverged, we have to abort and try again.
if (status == ImplicitIntegrator<T>::ConvergenceStatus::kDiverged)
break;
// Otherwise, continue to the next Newton-Raphson iteration.
DRAKE_DEMAND(status ==
ImplicitIntegrator<T>::ConvergenceStatus::kNotConverged);
// Update the norm of the state update.
last_dx_norm = dx_norm;
}
DRAKE_LOGGER_DEBUG("StepAbstract() convergence failed");
// If Jacobian and iteration matrix factorizations are not reused, there
// is nothing else we can try. Note that get_reuse() returns false if
// "full Newton-Raphson" mode is activated (see
// ImplicitIntegrator::get_use_full_newton()).
if (!this->get_reuse())
return false;
// Try StepAbstract again. That method will freshen Jacobians and iteration
// matrix factorizations as necessary.
return StepAbstract(t0, h, xt0, g, compute_and_factor_iteration_matrix,
xtplus_guess, iteration_matrix, xtplus, trial + 1);
}
template <class T>
bool ImplicitEulerIntegrator<T>::StepImplicitEuler(const T& t0, const T& h,
const VectorX<T>& xt0, VectorX<T>* xtplus) {
DRAKE_LOGGER_DEBUG("StepImplicitEuler(h={}) t={}", h, t0);
// Use the current state as the candidate value for the next state.
// [Hairer 1996] validates this choice (p. 120).
const VectorX<T>& xtplus_guess = xt0;
return this->StepImplicitEulerWithGuess(t0, h, xt0, xtplus_guess, xtplus);
}
template <class T>
bool ImplicitEulerIntegrator<T>::StepImplicitEulerWithGuess(
const T& t0, const T& h, const VectorX<T>& xt0,
const VectorX<T>& xtplus_guess, VectorX<T>* xtplus) {
using std::abs;
DRAKE_LOGGER_DEBUG("StepImplicitEulerWithGuess(h={}) t={}", h, t0);
// Set g for the implicit Euler method.
Context<T>* context = this->get_mutable_context();
std::function<VectorX<T>()> g =
[&xt0, h, context, this]() {
return (context->get_continuous_state().CopyToVector() - xt0 -
h * this->EvalTimeDerivatives(*context).CopyToVector()).eval();
};
// Attempt the step.
return StepAbstract(t0, h, xt0, g,
ComputeAndFactorImplicitEulerIterationMatrix,
xtplus_guess, &ie_iteration_matrix_, &*xtplus);
}
template <class T>
bool ImplicitEulerIntegrator<T>::StepHalfSizedImplicitEulers(
const T& t0, const T& h, const VectorX<T>& xt0,
const VectorX<T>& xtplus_ie, VectorX<T>* xtplus) {
using std::abs;
DRAKE_LOGGER_DEBUG("StepHalfSizedImplicitEulers(h={}) t={}", h, t0);
// Store statistics before calling StepAbstract(). The difference between
// the modified statistics and the stored statistics will be used to compute
// the half-sized-implicit-Euler-specific statistics.
const int stored_num_jacobian_evaluations =
this->get_num_jacobian_evaluations();
const int stored_num_iter_factorizations =
this->get_num_iteration_matrix_factorizations();
const int64_t stored_num_function_evaluations =
this->get_num_derivative_evaluations();
const int64_t stored_num_jacobian_function_evaluations =
this->get_num_derivative_evaluations_for_jacobian();
const int stored_num_nr_iterations =
this->get_num_newton_raphson_iterations();
// We set our guess for the state after a half-step to the average of the
// guess for the final state, xtplus_ie, and the initial state, xt0.
VectorX<T> xtmp = 0.5 * (xt0 + xtplus_ie);
// Attempt to step.
bool success = StepImplicitEulerWithGuess(t0, 0.5 * h, xt0, xtmp, xtplus);
if (!success) {
DRAKE_LOGGER_DEBUG("First Half IE convergence failed.");
} else {
// Swap the current output, xtplus, into xthalf, which functions as the new
// xⁿ.
std::swap(xtmp, *xtplus);
const VectorX<T>& xthalf = xtmp;
// Since the first half-step succeeded, either we recomputed a Jacobian at
// (t0, x0), or we reused an older Jacobian. Therefore, as far as the next
// half-sized step is concerned, the Jacobian is not at state
// (t+h/2, x(t+h/2)). Since jacobian_is_fresh_ means that the Jacobian is
// computed at the (t0,x0) of the beginning of the step we want to take, we
// mark it as not-fresh.
this->set_jacobian_is_fresh(false);
// TODO(antequ): One possible optimization is, if the Jacobian is fresh at
// this point, we can set a flag to cache the Jacobian if it gets
// recomputed, so that if the second substep fails, we simply restore the
// cached Jacobian instead of marking it stale. Since the second substep
// very rarely fails if the large step and the first substep succeeded,
// our tests indicates that this optimization saves only about 2% of the
// effort (0-5% in most cases), on a stiff 3-body pile of objects example.
// Therefore we omitted this optimization for code simplicity. See
// Revision 1 of PR 13224 for an implementation of this optimization.
success = StepImplicitEulerWithGuess(t0 + 0.5 * h, 0.5 * h, xthalf,
xtplus_ie, xtplus);
if (!success) {
DRAKE_LOGGER_DEBUG("Second Half IE convergence failed.");
// After a failure, the Jacobians were updated, so we have to mark that
// the current Jacobian is not fresh by setting
// failed_jacobian_is_from_second_small_step_ to true, so that at the
// beginning of the next step, we know to set jacobian_is_fresh_ to
// false, in DoImplicitIntegratorStep(). (Note that here we were slightly
// abusing the jacobian_is_fresh_ flag --- for the second half-sized step,
// we called MaybeFreshenMatrices() at t = t0 + h/2, meaning that
// jacobian_is_fresh_ now marks whether the Jacobian was last computed at
// t = t0 + h/2 instead of its usual definition of t = t0. This is why
// when the step fails, ImplicitIntegrator<T>::DoStep() will incorrectly
// mark the Jacobian as fresh, and we will need to fix this in
// DoImplicitIntegratorStep() for the next step.)
failed_jacobian_is_from_second_small_step_ = true;
}
}
// Move statistics to half-sized-implicit-Euler-specific statistics.
// Notice that we log the statistics even if either step fails.
hie_statistics_.num_jacobian_reforms +=
this->get_num_jacobian_evaluations() - stored_num_jacobian_evaluations;
hie_statistics_.num_iter_factorizations +=
this->get_num_iteration_matrix_factorizations() -
stored_num_iter_factorizations;
hie_statistics_.num_function_evaluations +=
this->get_num_derivative_evaluations() - stored_num_function_evaluations;
hie_statistics_.num_jacobian_function_evaluations +=
this->get_num_derivative_evaluations_for_jacobian() -
stored_num_jacobian_function_evaluations;
hie_statistics_.num_nr_iterations +=
this->get_num_newton_raphson_iterations() - stored_num_nr_iterations;
return success;
}
template <class T>
bool ImplicitEulerIntegrator<T>::StepImplicitTrapezoid(
const T& t0, const T& h, const VectorX<T>& xt0, const VectorX<T>& dx0,
const VectorX<T>& xtplus_ie, VectorX<T>* xtplus) {
using std::abs;
DRAKE_LOGGER_DEBUG("StepImplicitTrapezoid(h={}) t={}", h, t0);
// Set g for the implicit trapezoid method.
// Define g(x(t+h)) ≡ x(t0+h) - x(t0) - h/2 (f(t0,x(t0)) + f(t0+h,x(t0+h)) and
// evaluate it at the current x(t+h).
Context<T>* context = this->get_mutable_context();
std::function<VectorX<T>()> g =
[&xt0, h, &dx0, context, this]() {
return (context->get_continuous_state().CopyToVector() - xt0 - h/2 *
(dx0 + this->EvalTimeDerivatives(
*context).CopyToVector().eval())).eval();
};
// Store statistics before calling StepAbstract(). The difference between
// the modified statistics and the stored statistics will be used to compute
// the trapezoid method-specific statistics.
const int stored_num_jacobian_evaluations =
this->get_num_jacobian_evaluations();
const int stored_num_iter_factorizations =
this->get_num_iteration_matrix_factorizations();
const int64_t stored_num_function_evaluations =
this->get_num_derivative_evaluations();
const int64_t stored_num_jacobian_function_evaluations =
this->get_num_derivative_evaluations_for_jacobian();
const int stored_num_nr_iterations =
this->get_num_newton_raphson_iterations();
// Attempt to step.
bool success = StepAbstract(t0, h, xt0, g,
ComputeAndFactorImplicitTrapezoidIterationMatrix,
xtplus_ie, &itr_iteration_matrix_, xtplus);
// Move statistics to implicit trapezoid-specific.
// Notice that we log the statistics even if the step fails.
itr_statistics_.num_jacobian_reforms +=
this->get_num_jacobian_evaluations() - stored_num_jacobian_evaluations;
itr_statistics_.num_iter_factorizations +=
this->get_num_iteration_matrix_factorizations() -
stored_num_iter_factorizations;
itr_statistics_.num_function_evaluations +=
this->get_num_derivative_evaluations() - stored_num_function_evaluations;
itr_statistics_.num_jacobian_function_evaluations +=
this->get_num_derivative_evaluations_for_jacobian() -
stored_num_jacobian_function_evaluations;
itr_statistics_.num_nr_iterations +=
this->get_num_newton_raphson_iterations() - stored_num_nr_iterations;
return success;
}
template <class T>
bool ImplicitEulerIntegrator<T>::AttemptStepPaired(const T& t0, const T& h,
const VectorX<T>& xt0, VectorX<T>* xtplus_ie, VectorX<T>* xtplus_hie) {
using std::abs;
DRAKE_ASSERT(xtplus_ie != nullptr);
DRAKE_ASSERT(xtplus_hie != nullptr);
// Compute the derivative at time and state (t0, x(t0)). NOTE: the derivative
// is calculated at this point (early on in the integration process) in order
// to reuse the derivative evaluation, via the cache, from the last
// integration step (if possible).
const VectorX<T> dx0 = this->EvalTimeDerivatives(
this->get_context()).CopyToVector();
// Do the Euler step.
if (!StepImplicitEuler(t0, h, xt0, xtplus_ie)) {
DRAKE_LOGGER_DEBUG("Implicit Euler approach did not converge for "
"step size {}", h);
return false;
}
if (!use_implicit_trapezoid_error_estimation_) {
// In this case, step two half-sized implicit Euler steps along
// with the full step for error estimation.
if (StepHalfSizedImplicitEulers(t0, h, xt0, *xtplus_ie, xtplus_hie)) {
Context<T>* context = this->get_mutable_context();
context->SetTimeAndContinuousState(t0 + h, *xtplus_hie);
return true;
} else {
DRAKE_LOGGER_DEBUG("Implicit Euler half-step approach FAILED with a step"
"size that succeeded on full-sized implicit Euler.");
return false;
}
} else {
// In this case, use the implicit trapezoid method, which is defined as:
// x(t0+h) = x(t0) + h/2 (f(t0, x(t0) + f(t0+h, x(t0+h))
// x(t0+h) from the implicit Euler method is presumably a good starting
// point.
// The error estimate is derived as follows (thanks to Michael Sherman):
// x*(t0+h) = xᵢₑ(t0+h) + O(h²) [implicit Euler]
// = xₜᵣ(t0+h) + O(h³) [implicit trapezoid]
// where x*(t0+h) is the true (generally unknown) answer that we seek.
// This implies:
// xᵢₑ(t0+h) + O(h²) = xₜᵣ(t0+h) + O(h³).
// Given that the second order term subsumes the third order one, we have:
// xᵢₑ(t0+h) - xₜᵣ(t0+h) = O(h²).
// Attempt to compute the implicit trapezoid solution.
if (StepImplicitTrapezoid(t0, h, xt0, dx0, *xtplus_ie, xtplus_hie)) {
// Reset the state to that computed by implicit Euler.
// TODO(edrumwri): Explore using the implicit trapezoid method solution
// instead as *the* solution, rather than the implicit
// Euler. Refer to [Lambert, 1991], Ch 6.
Context<T>* context = this->get_mutable_context();
context->SetTimeAndContinuousState(t0 + h, *xtplus_ie);
return true;
} else {
DRAKE_LOGGER_DEBUG("Implicit trapezoid approach FAILED with a step"
"size that succeeded on implicit Euler.");
return false;
}
}
}
template <class T>
bool ImplicitEulerIntegrator<T>::DoImplicitIntegratorStep(const T& h) {
// Save the current time and state.
Context<T>* context = this->get_mutable_context();
const T t0 = context->get_time();
DRAKE_LOGGER_DEBUG("IE DoStep(h={}) t={}", h, t0);
xt0_ = context->get_continuous_state().CopyToVector();
xtplus_ie_.resize(xt0_.size());
xtplus_hie_.resize(xt0_.size());
// If the most recent step failed only after the second small step, this
// indicates that the Jacobian was computed from the second small step, and
// not at (t0, xt0). ImplicitIntegrator<T>::DoStep() would have incorrectly
// marked the Jacobian as fresh, and so we must correct it by marking
// jacobian_is_fresh_ as false. (Note that this occurs because we were
// slightly abusing the jacobian_is_fresh_ flag --- for the second half-sized
// step, we called MaybeFreshenMatrices() at t = t0 + h/2, meaning that
// jacobian_is_fresh_ marked whether the Jacobian was last computed at
// t = t0 + h/2 instead of its usual definition of t = t0. This is why when
// the step failed, ImplicitIntegrator<T>::DoStep() would have incorrectly
// marked the Jacobian as fresh.)
if (failed_jacobian_is_from_second_small_step_) {
this->set_jacobian_is_fresh(false);
// Now that we've correctly set the jacobian_is_fresh_ flag, make sure that
// the failed_jacobian_is_from_second_small_step_ is reset back to false.
failed_jacobian_is_from_second_small_step_ = false;
}
// If the requested h is less than the minimum step size, we'll advance time
// using an explicit Euler step.
if (h < this->get_working_minimum_step_size()) {
DRAKE_LOGGER_DEBUG("-- requested step too small, taking explicit "
"step instead");
// TODO(edrumwri): Investigate replacing this with an explicit trapezoid
// step, which would be expected to give better accuracy.
// The mitigating factor is that h is already small, so a
// test of, e.g., a square wave function, should quantify
// the improvement (if any).
// The error estimation process for explicit Euler uses an explicit second
// order Runge-Kutta method so that the order of the asymptotic term
// matches that used for estimating the error of the implicit Euler
// integrator.
// Compute the Euler step.
xdot_ = this->EvalTimeDerivatives(*context).CopyToVector();
xtplus_ie_ = xt0_ + h * xdot_;
// Compute the RK2 or two half-sized Euler steps.
if (use_implicit_trapezoid_error_estimation_) {
// Compute the RK2 step
const int evals_before_rk2 = rk2_->get_num_derivative_evaluations();
if (!rk2_->IntegrateWithSingleFixedStepToTime(t0 + h)) {
throw std::runtime_error("Embedded RK2 integrator failed to take a"
"single fixed step to the requested time.");
}
const int evals_after_rk2 = rk2_->get_num_derivative_evaluations();
xtplus_hie_ = context->get_continuous_state().CopyToVector();
// Update the implicit Trapezoid ODE counts.
itr_statistics_.num_function_evaluations +=
(evals_after_rk2 - evals_before_rk2);
// Revert the state to that computed by explicit Euler.
context->SetTimeAndContinuousState(t0 + h, xtplus_ie_);
} else {
// complete two half-sized explicit Euler steps.
xtplus_hie_ = xt0_ + 0.5 * h * xdot_;
context->SetTimeAndContinuousState(t0 + 0.5 * h, xtplus_hie_);
xtplus_hie_ +=
0.5 * h * this->EvalTimeDerivatives(*context).CopyToVector();
// Update the half-sized step ODE counts.
++(hie_statistics_.num_function_evaluations);
context->SetTimeAndContinuousState(t0 + h, xtplus_hie_);
}
} else {
// Try taking the requested step.
bool success = AttemptStepPaired(t0, h, xt0_, &xtplus_ie_, &xtplus_hie_);
// If the step was not successful, reset the time and state.
if (!success) {
context->SetTimeAndContinuousState(t0, xt0_);
return false;
}
}
// Compute and update the error estimate.
err_est_vec_ = xtplus_ie_ - xtplus_hie_;
// Update the caller-accessible error estimate.
this->get_mutable_error_estimate()->get_mutable_vector().
SetFromVector(err_est_vec_);
return true;
}
} // namespace systems
} // namespace drake
DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS(
class drake::systems::ImplicitEulerIntegrator)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/runge_kutta2_integrator.cc | #include "drake/systems/analysis/runge_kutta2_integrator.h"
DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_SCALARS(
class drake::systems::RungeKutta2Integrator)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/simulator.cc | #include "drake/systems/analysis/simulator.h"
#include <thread>
#include "drake/common/extract_double.h"
#include "drake/common/text_logging.h"
#include "drake/systems/analysis/runge_kutta3_integrator.h"
#include "drake/systems/analysis/simulator_python_internal.h"
namespace drake {
namespace systems {
template <typename T>
Simulator<T>::Simulator(const System<T>& system,
std::unique_ptr<Context<T>> context)
: Simulator(&system, nullptr, std::move(context)) {}
template <typename T>
Simulator<T>::Simulator(std::unique_ptr<const System<T>> owned_system,
std::unique_ptr<Context<T>> context)
: Simulator(nullptr, std::move(owned_system), std::move(context)) {}
template <typename T>
Simulator<T>::Simulator(const System<T>* system,
std::unique_ptr<const System<T>> owned_system,
std::unique_ptr<Context<T>> context)
: owned_system_(std::move(owned_system)),
system_(owned_system_ ? *owned_system_ : *system),
context_(std::move(context)) {
// TODO(dale.mcconachie) move this default to SimulatorConfig
constexpr double kDefaultInitialStepSizeTarget = 1e-4;
// Create a context if necessary.
if (!context_) context_ = system_.CreateDefaultContext();
// Create a default integrator and initialize it.
DRAKE_DEMAND(SimulatorConfig{}.integration_scheme == "runge_kutta3");
integrator_ = std::unique_ptr<IntegratorBase<T>>(
new RungeKutta3Integrator<T>(system_, context_.get()));
integrator_->request_initial_step_size_target(
kDefaultInitialStepSizeTarget);
integrator_->set_maximum_step_size(SimulatorConfig{}.max_step_size);
integrator_->set_target_accuracy(SimulatorConfig{}.accuracy);
integrator_->Initialize();
// Allocate the necessary temporaries for storing state in update calls
// (which will then be transferred back to system state).
discrete_updates_ = system_.AllocateDiscreteVariables();
unrestricted_updates_ = context_->CloneState();
// Allocate the vector of active witness functions.
witness_functions_ = std::make_unique<
std::vector<const WitnessFunction<T>*>>();
// Allocate the necessary temporary for witness-based event handling.
event_handler_xc_ = system_.AllocateTimeDerivatives();
}
template <typename T>
SimulatorStatus Simulator<T>::Initialize(const InitializeParams& params) {
// TODO(sherm1) Modify Context to satisfy constraints.
// TODO(sherm1) Invoke System's initial conditions computation.
if (!context_)
throw std::logic_error("Initialize(): Context has not been set.");
initialization_done_ = false;
// Record the current time so we can restore it later (see below).
// *Don't* use a reference here!
const T current_time = context_->get_time();
// Assumes success.
SimulatorStatus initialize_status(ExtractDoubleOrThrow(current_time));
// Initialize the integrator.
integrator_->Initialize();
// Restore default values.
ResetStatistics();
// Collect all the initialization events for processing below.
merged_events_ = system_.AllocateCompositeEventCollection();
if (!params.suppress_initialization_events) {
system_.GetInitializationEvents(*context_, merged_events_.get());
}
// Event status uses a "first worst" strategy but gives up immediately if a
// state-modifying event reports failure. However, all publish events are
// invoked even if one fails, prior to reporting the failure.
// Do unrestricted updates first.
EventStatus accumulated_event_status = HandleUnrestrictedUpdate(
merged_events_->get_unrestricted_update_events());
if (HasEventFailureOrMaybeThrow(accumulated_event_status,
true /*throw on failure*/,
&initialize_status)) {
return initialize_status;
}
// Do restricted (discrete variable) updates next.
accumulated_event_status.KeepMoreSevere(
HandleDiscreteUpdate(merged_events_->get_discrete_update_events()));
if (HasEventFailureOrMaybeThrow(accumulated_event_status,
true /*throw on failure*/,
&initialize_status)) {
return initialize_status;
}
// TODO(sherm1) "early publish" initialization events should be handled here.
// Allocate persistent event collection data structures.
per_step_events_ = system_.AllocateCompositeEventCollection();
DRAKE_DEMAND(per_step_events_ != nullptr);
timed_events_ = system_.AllocateCompositeEventCollection();
DRAKE_DEMAND(timed_events_ != nullptr);
witnessed_events_ = system_.AllocateCompositeEventCollection();
DRAKE_DEMAND(witnessed_events_ != nullptr);
// If any one of the unrestricted or discrete update events (or early publish
// events) reported "reached termination" then we shouldn't go any further. To
// see why, compare this with AdvanceTo() where this is the point of a step
// after which we would advance time (which we should not do if we've already
// reached termination).
if (!accumulated_event_status.reached_termination()) {
// The initial update events did not cause termination, so we need to issue
// end-of-step publishes and prepare for the next step. We'll handle publish
// events below (including initialization, forced, time-triggered, and
// monitor events) and collect time- and step-triggered update events to
// be issued at the start of the next step.
// Collect the per-step events.
system_.GetPerStepEvents(*context_, per_step_events_.get());
// Collect time-triggered events that trigger now, if any. To ensure that
// CalcNextUpdateTime() can return the current time we briefly perturb the
// current time slightly toward negative infinity.
// TODO(sherm1) This is broken if an exception is raised in
// CalcNextUpdateTime() since the time will be left perturbed. Likely fix:
// use a ScopeExit object that restores the time on destruction.
const T slightly_before_current_time =
internal::GetPreviousNormalizedValue(current_time);
context_->PerturbTime(slightly_before_current_time, current_time);
const T time_of_next_timed_event =
system_.CalcNextUpdateTime(*context_, timed_events_.get());
context_->SetTime(current_time); // Restore the current time.
// Indicate a timed event is to be handled now, if appropriate.
time_or_witness_triggered_ =
(time_of_next_timed_event == current_time ? kTimeTriggered
: kNothingTriggered);
// Merge the initialization events with per-step events and current_time
// time-triggered events. Initialization events will precede any per-step or
// timed events in the merged collection. Note that per-step and timed
// discrete/unrestricted update events are *not* processed here; just
// publish events.
merged_events_->AddToEnd(*per_step_events_);
if (time_or_witness_triggered_ & kTimeTriggered)
merged_events_->AddToEnd(*timed_events_);
// At this point merged_events_ has any publish events we need to handle
// now (including initialization, per-step, and time now-triggered).
// Defer throwing on failure until all publish events are handled.
accumulated_event_status.KeepMoreSevere(
HandlePublish(merged_events_->get_publish_events()));
// If requested, do a force-publish before the simulation starts.
if (publish_at_initialization_) {
accumulated_event_status.KeepMoreSevere(
HandlePublish(system_.get_forced_publish_events()));
}
// Invoke the monitor() if there is one. This is logically like a
// Diagram-level Publish event so we handle it similarly.
if (get_monitor())
accumulated_event_status.KeepMoreSevere(get_monitor()(*context_));
if (HasEventFailureOrMaybeThrow(accumulated_event_status,
true /*throw on failure*/,
&initialize_status)) {
return initialize_status;
}
}
// This is expected to be used for interrupting a long-running simulation
// but we'll call it here for consistency with AdvanceTo() in case it gains
// other uses.
if (python_monitor_ != nullptr) python_monitor_();
// If we get here, none of the event handlers reported failure, but we may
// have reached early termination.
if (accumulated_event_status.severity() == EventStatus::kReachedTermination) {
initialize_status.SetReachedTermination(
ExtractDoubleOrThrow(context_->get_time()),
accumulated_event_status.system(), accumulated_event_status.message());
}
// This is a successful initialization, though possibly returning "reached
// termination".
last_known_simtime_ = ExtractDoubleOrThrow(context_->get_time());
initialization_done_ = true;
return initialize_status;
}
// Processes UnrestrictedUpdateEvent events.
template <typename T>
EventStatus Simulator<T>::HandleUnrestrictedUpdate(
const EventCollection<UnrestrictedUpdateEvent<T>>& events) {
if (!events.HasEvents()) return EventStatus::DidNothing();
// First, compute the unrestricted updates into a temporary buffer
// and return if no update is warranted.
const EventStatus status = system_.CalcUnrestrictedUpdate(
*context_, events, unrestricted_updates_.get());
if (status.did_nothing() || status.failed()) return status;
// Now write the update back into the context.
system_.ApplyUnrestrictedUpdate(events, unrestricted_updates_.get(),
context_.get());
++num_unrestricted_updates_;
// Mark the witness function vector as needing to be redetermined.
redetermine_active_witnesses_ = true;
return status;
}
// Processes DiscreteEvent events.
template <typename T>
EventStatus Simulator<T>::HandleDiscreteUpdate(
const EventCollection<DiscreteUpdateEvent<T>>& events) {
if (!events.HasEvents()) return EventStatus::DidNothing();
// First, compute the discrete updates into a temporary buffer and return
// if no update is warranted.
const EventStatus status = system_.CalcDiscreteVariableUpdate(
*context_, events, discrete_updates_.get());
if (status.did_nothing() || status.failed()) return status;
// Write updates back into the context.
system_.ApplyDiscreteVariableUpdate(events, discrete_updates_.get(),
context_.get());
++num_discrete_updates_;
return status;
}
// Processes Publish events.
template <typename T>
EventStatus Simulator<T>::HandlePublish(
const EventCollection<PublishEvent<T>>& events) {
if (!events.HasEvents()) return EventStatus::DidNothing();
// Don't count this as a publish if it did nothing or failed.
const EventStatus status = system_.Publish(*context_, events);
if (status.did_nothing() || status.failed()) return status;
++num_publishes_;
return status;
}
template <typename T>
bool Simulator<T>::HasEventFailureOrMaybeThrow(
const EventStatus& event_status, bool throw_on_failure,
SimulatorStatus* simulator_status) {
if (!event_status.failed()) return false;
simulator_status->SetEventHandlerFailed(
ExtractDoubleOrThrow(context_->get_time()), event_status.system(),
event_status.message());
if (throw_on_failure)
throw std::runtime_error(simulator_status->FormatMessage());
return true; // Failed, propagate error status upward.
}
template <typename T>
SimulatorStatus Simulator<T>::AdvanceTo(const T& boundary_time) {
if (!initialization_done_) {
const SimulatorStatus initialize_status = Initialize();
if (!initialize_status.succeeded())
return initialize_status;
}
DRAKE_DEMAND(!std::isnan(last_known_simtime_));
if (last_known_simtime_ != context_->get_time()) {
throw std::logic_error(
"Simulation time has changed since last Initialize() or AdvanceTo()."
" Resetting simulation time requires a call to Initialize().");
}
DRAKE_THROW_UNLESS(boundary_time >= context_->get_time());
// Assume success.
SimulatorStatus simulator_status(ExtractDoubleOrThrow(boundary_time));
DRAKE_DEMAND(timed_events_ != nullptr);
DRAKE_DEMAND(witnessed_events_ != nullptr);
DRAKE_DEMAND(merged_events_ != nullptr);
// Clear events for the loop iteration.
merged_events_->Clear();
merged_events_->AddToEnd(*per_step_events_);
// Merge in timed and witnessed events, if necessary.
if (time_or_witness_triggered_ & kTimeTriggered)
merged_events_->AddToEnd(*timed_events_);
if (time_or_witness_triggered_ & kWitnessTriggered)
merged_events_->AddToEnd(*witnessed_events_);
// Take steps until desired interval has completed.
while (true) {
// Starting a new step on the trajectory.
const T step_start_time = context_->get_time();
DRAKE_LOGGER_TRACE("Starting a simulation step at {}", step_start_time);
// Delay to match target realtime rate if requested and possible.
PauseIfTooFast();
// The general policy here is to do actions in decreasing order of
// "violence" to the state, i.e. unrestricted -> discrete -> continuous ->
// publish. The "timed" actions happen before the "per step" ones.
// Event status is accumulated in a "first worst" manner -- we keep going
// until something fails. Failure from state-updating events (discrete or
// unrestricted) halts processing immediately. Failure from a publish event
// still allows the remaining publish events to be handled prior to
// reporting the failure.
// Do unrestricted updates first.
EventStatus accumulated_event_status = HandleUnrestrictedUpdate(
merged_events_->get_unrestricted_update_events());
if (HasEventFailureOrMaybeThrow(accumulated_event_status,
true /*throw on failure*/,
&simulator_status)) {
return simulator_status;
}
// Do restricted (discrete variable) updates next.
accumulated_event_status.KeepMoreSevere(
HandleDiscreteUpdate(merged_events_->get_discrete_update_events()));
if (HasEventFailureOrMaybeThrow(accumulated_event_status,
true /*throw on failure*/,
&simulator_status)) {
return simulator_status;
}
// TODO(sherm1) "early publish" events should be handled here. Also
// update the Step() documentation at the top of simulator.h.
// If any one of the unrestricted or discrete update events (or early
// publish events) reported "reached termination" (at start time tₛ), then
// we're done with this step (and the simulation) and should not advance
// time further. This doesn't increment num_steps_taken because we really
// just finished off the previous step (like AdvancePendingEvents()).
if (!accumulated_event_status.reached_termination()) {
// The initial update events did not cause termination, so we need to
// advance time, issue end-of-step publishes and prepare for the next
// step.
// How far can we advance time before we have to handle timed events? This
// can return infinity, meaning we don't see any timed events coming. When
// an earlier event trigger time is returned, at least one Event object
// must be returned. Note that if the returned time is the current time,
// we handle the Events and then restart at the same time, possibly
// discovering more events.
const T time_of_next_timed_event =
system_.CalcNextUpdateTime(*context_, timed_events_.get());
DRAKE_DEMAND(time_of_next_timed_event >= step_start_time);
using std::isfinite;
DRAKE_DEMAND(!isfinite(time_of_next_timed_event) ||
timed_events_->HasEvents());
// Determine whether the set of events requested by the System at
// time_of_next_timed_event includes an Update action, a Publish action,
// or both.
T next_update_time = std::numeric_limits<double>::infinity();
T next_publish_time = std::numeric_limits<double>::infinity();
if (timed_events_->HasDiscreteUpdateEvents() ||
timed_events_->HasUnrestrictedUpdateEvents()) {
next_update_time = time_of_next_timed_event;
}
if (timed_events_->HasPublishEvents()) {
next_publish_time = time_of_next_timed_event;
}
// Integrate the continuous state (if any) forward in time. Note that if
// time_of_next_timed_event is the current time, this will return
// immediately without time having advanced. That still counts as a step.
time_or_witness_triggered_ = IntegrateContinuousState(
next_publish_time,
next_update_time,
boundary_time,
witnessed_events_.get());
// Update the number of simulation steps taken.
++num_steps_taken_;
// TODO(sherm1) Constraint projection goes here.
// Clear events for the next loop iteration.
merged_events_->Clear();
// Merge in per-step events.
merged_events_->AddToEnd(*per_step_events_);
// Only merge timed / witnessed events in if an event was triggered.
if (time_or_witness_triggered_ & kTimeTriggered)
merged_events_->AddToEnd(*timed_events_);
if (time_or_witness_triggered_ & kWitnessTriggered)
merged_events_->AddToEnd(*witnessed_events_);
// At this point merged_events_ has any publish events we need to handle
// now (including per-step, witnessed, and time-triggered). These are
// simultaneous and all will be handled even if there is a failure.
accumulated_event_status.KeepMoreSevere(
HandlePublish(merged_events_->get_publish_events()));
if (get_publish_every_time_step()) {
accumulated_event_status.KeepMoreSevere(
HandlePublish(system_.get_forced_publish_events()));
}
// Invoke the monitor() if there is one. This is logically like a
// Diagram-level Publish event so we handle it similarly.
if (get_monitor())
accumulated_event_status.KeepMoreSevere(get_monitor()(*context_));
// If any of the publish event handlers failed, stop now.
if (HasEventFailureOrMaybeThrow(accumulated_event_status,
true /*throw on failure*/,
&simulator_status)) {
return simulator_status;
}
}
// Allow for interrupt in Python.
if (python_monitor_ != nullptr) python_monitor_();
// If we get here, none of the event handlers reported failure, but we may
// have reached early termination.
if (accumulated_event_status.reached_termination()) {
simulator_status.SetReachedTermination(
ExtractDoubleOrThrow(context_->get_time()),
accumulated_event_status.system(),
accumulated_event_status.message());
}
// Break out of the loop if we've reached a termination condition. Except
// in case of failure above, at termination we will have already handled
// publish events and merged other per-step, timed, and witnessed events
// into the event collection. Those "straggler" events can be handled at the
// end of a simulation with AdvancePendingEvents().
if (!simulator_status.succeeded() || context_->get_time() >= boundary_time)
break;
}
// TODO(sherm1) Provide an option to return status rather than throw on
// failure.
// We reach here after hitting a termination condition or a failure when
// "throw on failure" is disabled (not supported yet). Note that if we
// exit by throwing the following "clean up" steps will not have been
// performed.
// TODO(edrumwri): Add test coverage to complete #8490.
redetermine_active_witnesses_ = true;
// Record the time to detect unexpected jumps.
last_known_simtime_ = ExtractDoubleOrThrow(context_->get_time());
return simulator_status;
}
template <class T>
std::optional<T> Simulator<T>::GetCurrentWitnessTimeIsolation() const {
using std::max;
// TODO(edrumwri): Add ability to disable witness time isolation through
// a Simulator setting.
// The scale factor for witness isolation accuracy, which can allow witness
// function zeros to be isolated more or less tightly, for positive values
// less than one and greater than one, respectively. This number should be a
// reasonable default that allows witness isolation accuracy to be
// commensurate with integrator accuracy for most systems.
const double iso_scale_factor = 0.01;
// TODO(edrumwri): Acquire characteristic time properly from the system
// (i.e., modify the System to provide this value).
const double characteristic_time = 1.0;
// Get the accuracy setting.
const std::optional<double>& accuracy = get_context().get_accuracy();
// Determine the length of the isolation interval.
if (integrator_->get_fixed_step_mode()) {
// Look for accuracy information.
if (accuracy) {
return max(integrator_->get_working_minimum_step_size(),
T(iso_scale_factor * accuracy.value() *
integrator_->get_maximum_step_size()));
} else {
return std::optional<T>();
}
}
// Integration with error control isolation window determination.
if (!accuracy) {
throw std::logic_error("Integrator is not operating in fixed step mode "
"and accuracy is not set in the context.");
}
// Note: the max computation is used (here and above) because it is
// ineffectual to attempt to isolate intervals smaller than the current time
// in the context can allow.
return max(integrator_->get_working_minimum_step_size(),
iso_scale_factor * accuracy.value() * characteristic_time);
}
// Determines whether any witnesses trigger over the interval [t0, tw],
// where tw - t0 < ε and ε is the "witness isolation length". If one or more
// witnesses does trigger over this interval, the time (and corresponding state)
// will be advanced to tw and those witnesses will be stored in
// `triggered_witnesses` on return. On the other hand (i.e., if no witnesses)
// trigger over [t0, t0 + ε], time (and corresponding state) will be advanced
// to some tc in the open interval (t0, tf) such that no witnesses trigger
// over [t0, tc]; in other words, we deem it "safe" to integrate to tc.
// @param[in,out] triggered_witnesses on entry, the set of witness functions
// that triggered over [t0, tf]; on exit, the set of witness
// functions that triggered over [t0, tw], where tw is some time
// such that tw - t0 < ε. If no functions trigger over
// [t0, t0 + ε], `triggered_witnesses` will be empty on exit.
// @pre The time and state are at tf and x(tf), respectively, and at least
// one witness function has triggered over [t0, tf].
// @post If `triggered_witnesses` is empty, the time and state will be
// set to some tc and x(tc), respectively, such that no witnesses trigger
// over [t0, tc]. Otherwise, the time and state will be set to tw and
// x(tw), respectively.
// @note The underlying assumption is that a witness function triggers over a
// interval [a, d] for d ≤ the maximum integrator step size if that
// witness also triggers over interval [a, b] for some b < d. Per
// WitnessFunction documentation, we assume that a witness function
// crosses zero at most once over an interval of size [t0, tf]).
template <class T>
void Simulator<T>::IsolateWitnessTriggers(
const std::vector<const WitnessFunction<T>*>& witnesses,
const VectorX<T>& w0,
const T& t0, const VectorX<T>& x0, const T& tf,
std::vector<const WitnessFunction<T>*>* triggered_witnesses) {
// Verify that the vector of triggered witnesses is non-null.
DRAKE_DEMAND(triggered_witnesses != nullptr);
// TODO(edrumwri): Speed this process using interpolation between states,
// more powerful root finding methods, and/or introducing the concept of
// a dead band.
// Will need to alter the context repeatedly.
Context<T>& context = get_mutable_context();
// Get the witness isolation interval length.
const std::optional<T> witness_iso_len = GetCurrentWitnessTimeIsolation();
// Check whether witness functions *are* to be isolated. If not, the witnesses
// that were triggered on entry will be the set that is returned.
if (!witness_iso_len)
return;
// Mini function for integrating the system forward in time from t0.
std::function<void(const T&)> integrate_forward =
[&t0, &x0, &context, this](const T& t_des) {
const T inf = std::numeric_limits<double>::infinity();
context.SetTime(t0);
context.SetContinuousState(x0);
while (context.get_time() < t_des)
integrator_->IntegrateNoFurtherThanTime(inf, inf, t_des);
};
// Starting from c = (t0 + tf)/2, look for a witness function triggering
// over the interval [t0, tc]. Assuming a witness does trigger, c will
// continue moving leftward as a witness function triggers until the length of
// the time interval is small. If a witness fails to trigger as c moves
// leftward, we return, indicating that no witnesses triggered over [t0, c].
DRAKE_LOGGER_DEBUG(
"Isolating witness functions using isolation window of {} over [{}, {}]",
witness_iso_len.value(), t0, tf);
VectorX<T> wc(witnesses.size());
T a = t0;
T b = tf;
do {
// Compute the midpoint and evaluate the witness functions at it.
T c = (a + b) / 2;
DRAKE_LOGGER_DEBUG("Integrating forward to time {}", c);
integrate_forward(c);
// See whether any witness functions trigger.
bool trigger = false;
for (size_t i = 0; i < witnesses.size(); ++i) {
wc[i] = get_system().CalcWitnessValue(context, *witnesses[i]);
if (witnesses[i]->should_trigger(w0[i], wc[i]))
trigger = true;
}
// If no witness function triggered, we can continue integrating forward.
if (!trigger) {
// NOTE: Since we're always checking that the sign changes over [t0,c],
// it's also feasible to replace the two lines below with "a = c" without
// violating Simulator's contract to only integrate once over the interval
// [a, c], for some c <= b before per-step events are handled (i.e., it's
// unacceptable to take two steps of (c - a)/2 without processing per-step
// events first). That change would avoid handling unnecessary per-step
// events- we know no other events are to be handled between t0 and tf-
// but the current logic appears easier to follow.
DRAKE_LOGGER_DEBUG("No witness functions triggered up to {}", c);
triggered_witnesses->clear();
return; // Time is c.
} else {
b = c;
}
} while (b - a > witness_iso_len.value());
// Determine the set of triggered witnesses.
triggered_witnesses->clear();
for (size_t i = 0; i < witnesses.size(); ++i) {
if (witnesses[i]->should_trigger(w0[i], wc[i]))
triggered_witnesses->push_back(witnesses[i]);
}
}
// Evaluates the given vector of witness functions.
template <class T>
VectorX<T> Simulator<T>::EvaluateWitnessFunctions(
const std::vector<const WitnessFunction<T>*>& witness_functions,
const Context<T>& context) const {
const System<T>& system = get_system();
VectorX<T> weval(witness_functions.size());
for (size_t i = 0; i < witness_functions.size(); ++i)
weval[i] = system.CalcWitnessValue(context, *witness_functions[i]);
return weval;
}
// Determines whether at least one of a collection of witness functions
// triggered over a time interval [t0, tf] using the values of those functions
// evaluated at the left and right hand sides of that interval.
// @param witness_functions a vector of all witness functions active over
// [t0, tf].
// @param w0 the values of the witnesses evaluated at t0.
// @param wf the values of the witnesses evaluated at tf.
// @param [out] triggered_witnesses Returns one of the witnesses that triggered,
// if any.
// @returns `true` if a witness triggered or `false` otherwise.
template <class T>
bool Simulator<T>::DidWitnessTrigger(
const std::vector<const WitnessFunction<T>*>& witness_functions,
const VectorX<T>& w0,
const VectorX<T>& wf,
std::vector<const WitnessFunction<T>*>* triggered_witnesses) {
// See whether a witness function triggered.
triggered_witnesses->clear();
bool witness_triggered = false;
for (size_t i = 0; i < witness_functions.size() && !witness_triggered; ++i) {
if (witness_functions[i]->should_trigger(w0[i], wf[i])) {
witness_triggered = true;
triggered_witnesses->push_back(witness_functions[i]);
}
}
return witness_triggered;
}
// Populates event data for `event` triggered by a witness function (`witness`)
// that was evaluated over the time interval [`t0`, `tf`] and adds it to the
// given event collection (`events`).
template <class T>
void Simulator<T>::PopulateEventDataForTriggeredWitness(
const T& t0, const T& tf, const WitnessFunction<T>* witness,
Event<T>* event, CompositeEventCollection<T>* events) const {
// Populate the event data.
WitnessTriggeredEventData<T>* event_data =
event->template get_mutable_event_data<WitnessTriggeredEventData<T>>();
DRAKE_DEMAND(event_data != nullptr);
event_data->set_triggered_witness(witness);
event_data->set_t0(t0);
event_data->set_tf(tf);
event_data->set_xc0(event_handler_xc_.get());
event_data->set_xcf(&context_->get_continuous_state());
get_system().AddTriggeredWitnessFunctionToCompositeEventCollection(
event, events);
}
// (Re)determines the set of witness functions active over this interval,
// if necessary.
template <class T>
void Simulator<T>::RedetermineActiveWitnessFunctionsIfNecessary() {
const System<T>& system = get_system();
if (redetermine_active_witnesses_) {
witness_functions_->clear();
system.GetWitnessFunctions(get_context(), witness_functions_.get());
redetermine_active_witnesses_ = false;
}
}
// Integrates the continuous state forward in time while also locating
// the first zero of any triggered witness functions. Any of these times may
// be set to infinity to indicate that nothing is scheduled.
//
// @param next_publish_time the time at which the next publish event occurs.
// @param next_update_time the time at which the next update event occurs.
// @param boundary_time the maximum time to advance to.
// @param witnessed_events a non-null collection of events, which the method
// will clear on entry.
// @returns the kind of event triggers that terminated integration.
template <class T>
typename Simulator<T>::TimeOrWitnessTriggered
Simulator<T>::IntegrateContinuousState(
const T& next_publish_time, const T& next_update_time,
const T& boundary_time, CompositeEventCollection<T>* witnessed_events) {
using std::abs;
// Clear the composite event collection.
DRAKE_ASSERT(witnessed_events != nullptr);
witnessed_events->Clear();
// Save the time and current state.
const Context<T>& context = get_context();
const T t0 = context.get_time();
const VectorX<T> x0 = context.get_continuous_state().CopyToVector();
// Get the set of witness functions active at the current state.
RedetermineActiveWitnessFunctionsIfNecessary();
const auto& witness_functions = *witness_functions_;
// Evaluate the witness functions.
w0_ = EvaluateWitnessFunctions(witness_functions, context);
// Attempt to integrate. Updates and boundary times are consciously
// distinguished between. See internal documentation for
// IntegratorBase::IntegrateNoFurtherThanTime() for more information.
typename IntegratorBase<T>::StepResult result =
integrator_->IntegrateNoFurtherThanTime(
next_publish_time, next_update_time, boundary_time);
const T tf = context.get_time();
// Evaluate the witness functions again.
wf_ = EvaluateWitnessFunctions(witness_functions, context);
// Triggering requires isolating the witness function time.
if (DidWitnessTrigger(witness_functions, w0_, wf_, &triggered_witnesses_)) {
// Isolate the time that the witness function triggered. If witness triggers
// are detected in the interval [t0, tf], any additional time-triggered
// events are only relevant iff at least one witness function is
// successfully isolated (see IsolateWitnessTriggers() for details).
IsolateWitnessTriggers(
witness_functions, w0_, t0, x0, tf, &triggered_witnesses_);
// Store the state at x0 in the temporary continuous state. We only do this
// if there are triggered witnesses (even though `witness_triggered` is
// `true`, the witness might not have actually triggered after isolation).
if (!triggered_witnesses_.empty())
event_handler_xc_->SetFromVector(x0);
// Store witness function(s) that triggered.
for (const WitnessFunction<T>* fn : triggered_witnesses_) {
DRAKE_LOGGER_DEBUG("Witness function {} crossed zero at time {}",
fn->description(), context.get_time());
// Skip witness functions that have no associated event (i.e., skip
// witness functions whose sole purpose is to insert a break in the
// integration of continuous state).
if (!fn->get_event())
continue;
// Get the event object that corresponds to this witness function. If
// Simulator has yet to create this object, go ahead and create it.
auto& event = witness_function_events_[fn];
if (!event) {
event = fn->get_event()->Clone();
event->set_trigger_type(TriggerType::kWitness);
event->set_event_data(WitnessTriggeredEventData<T>());
}
PopulateEventDataForTriggeredWitness(t0, tf, fn, event.get(),
witnessed_events);
}
// When successful, the isolation process produces a vector of witnesses
// that trigger over every interval [t0, ti], ∀ti in (t0, tf]. If this
// vector (triggered_witnesses_) is empty, then time advanced to the first
// ti such that no witnesses triggered over [t0, ti].
const T& ti = context_->get_time();
if (!triggered_witnesses_.empty()) {
// We now know that integration terminated at a witness function crossing.
// Now we need to look for the unusual case in which a timed event should
// also trigger simultaneously.
// IntegratorBase::IntegrateNoFurtherThanTime(.) pledges to step no
// further than min(next_publish_time, next_update_time, boundary_time),
// so we'll verify that assertion.
DRAKE_DEMAND(ti <= next_update_time && tf <= next_publish_time);
if (ti == next_update_time || ti == next_publish_time) {
return kBothTriggered;
} else {
return kWitnessTriggered;
}
} else {
// Integration didn't succeed on the larger interval [t0, tf]; instead,
// the continuous state was integrated to the intermediate time ti, where
// t0 < ti < tf. Since any publishes/updates must occur at tf, there
// should be no triggers.
DRAKE_DEMAND(t0 < ti && ti < tf);
// The contract for IntegratorBase::IntegrateNoFurtherThanTime() specifies
// that tf must be less than or equal to next_update_time and
// next_publish_time. Since ti must be strictly less than tf, it follows
// that ti must be strictly less than next_update_time and
// next_publish_time.
DRAKE_DEMAND(next_update_time > ti && next_publish_time > ti);
return kNothingTriggered;
}
}
// No witness function triggered; handle integration as usual.
// Updates and boundary times are consciously distinguished between. See
// internal documentation for IntegratorBase::IntegrateNoFurtherThanTime() for
// more information.
switch (result) {
case IntegratorBase<T>::kReachedUpdateTime:
case IntegratorBase<T>::kReachedPublishTime:
return kTimeTriggered;
// We do nothing for these two cases.
case IntegratorBase<T>::kTimeHasAdvanced:
case IntegratorBase<T>::kReachedBoundaryTime:
return kNothingTriggered;
case IntegratorBase<T>::kReachedZeroCrossing:
case IntegratorBase<T>::kReachedStepLimit:
throw std::logic_error("Unexpected integrator result");
}
DRAKE_UNREACHABLE();
}
template <typename T>
void Simulator<T>::PauseIfTooFast() const {
if (target_realtime_rate_ <= 0) return; // Run at full speed.
const double simtime_now = ExtractDoubleOrThrow(get_context().get_time());
const double simtime_passed = simtime_now - initial_simtime_;
const TimePoint desired_realtime =
initial_realtime_ + Duration(simtime_passed / target_realtime_rate_);
// TODO(sherm1): Could add some slop to now() and not sleep if
// we are already close enough. But what is a reasonable value?
if (desired_realtime > Clock::now())
std::this_thread::sleep_until(desired_realtime);
}
template <typename T>
double Simulator<T>::get_actual_realtime_rate() const {
const double simtime_now = ExtractDoubleOrThrow(get_context().get_time());
const double simtime_passed = simtime_now - initial_simtime_;
const Duration realtime_passed = Clock::now() - initial_realtime_;
const double rate = (simtime_passed / realtime_passed.count());
return rate;
}
template <typename T>
void Simulator<T>::ResetStatistics() {
integrator_->ResetStatistics();
num_steps_taken_ = 0;
num_discrete_updates_ = 0;
num_unrestricted_updates_ = 0;
num_publishes_ = 0;
initial_simtime_ = ExtractDoubleOrThrow(get_context().get_time());
initial_realtime_ = Clock::now();
}
namespace internal {
template <typename T>
void SimulatorPythonInternal<T>::set_python_monitor(
Simulator<T>* simulator, void (*monitor)()) {
DRAKE_DEMAND(simulator != nullptr);
simulator->python_monitor_ = monitor;
}
} // namespace internal
} // namespace systems
} // namespace drake
DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS(
class drake::systems::Simulator)
DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS(
class drake::systems::internal::SimulatorPythonInternal)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/region_of_attraction.h | #pragma once
#include <optional>
#include "drake/common/name_value.h"
#include "drake/common/symbolic/expression.h"
#include "drake/solvers/solver_id.h"
#include "drake/solvers/solver_options.h"
#include "drake/systems/framework/context.h"
#include "drake/systems/framework/system.h"
namespace drake {
namespace systems {
namespace analysis {
/**
* Consolidates the many possible options to be passed to the region of
* attraction algorithm.
*/
struct RegionOfAttractionOptions {
RegionOfAttractionOptions() = default;
/** Passes this object to an Archive.
Refer to @ref yaml_serialization "YAML Serialization" for background. */
template <typename Archive>
void Serialize(Archive* a) {
a->Visit(DRAKE_NVP(lyapunov_candidate));
a->Visit(DRAKE_NVP(state_variables));
a->Visit(DRAKE_NVP(use_implicit_dynamics));
a->Visit(DRAKE_NVP(solver_id));
a->Visit(DRAKE_NVP(solver_options));
}
/** A candidate Lyapunov function using the symbolic Variables named
* x0, x1, ..., where the order matches the continuous state vector of the
* system being evaluated (or the vector state_variables).
*/
symbolic::Expression lyapunov_candidate{};
/** If non-empty, a list of Variable that associates the variable name with
* the elements of the System's continuous state vector. Must be empty
* or have size equal to the number of continuous state variables in the
* system.
*/
VectorX<symbolic::Variable> state_variables{};
/** If true, the system dynamics will be evaluated using
* CalcImplicitTimeDerivativesResidual instead of CalcTimeDerivatives to
* obtain g(x,ẋ) = 0 (instead of ẋ = f(x)). The Lyapunov conditions will
* also be evaluated in the implicit form. This is more expensive than
* analysis in the explicit form, as it requires more indeterminates, but it
* enables analysis of systems with rational polynomial dynamics.
*
* See https://underactuated.csail.mit.edu/lyapunov.html#ex:implicit for more
* details.
*/
bool use_implicit_dynamics{false};
/** If not std::nullopt, then we will solve the optimization problem using the
* specified solver; otherwise Drake will choose a solver.
*/
std::optional<solvers::SolverId> solver_id{std::nullopt};
/** The solver options used in the optimization problem. */
std::optional<solvers::SolverOptions> solver_options{std::nullopt};
};
/**
* Estimates the region of attraction of the time-invariant @p system at the
* fixed point defined by @p context.
*
* This implementation only searches for the largest level set of the
* `lyapunov_candidate` function from @p options (or a candidate obtained
* from solving the Lyapunov equation on the linearization).
*
* @param system a time-invariant continuous-time System that supports
* scalar-type conversion to symbolic::Expression. The dynamics of the
* system must be polynomial.
*
* @param context a Context that defines the parameters of the system and
* the fixed-point about which we are analyzing the regional stability.
*
* @param options provides a variety of configuration options. @see
* RegionOfAttractionOptions.
*
* @returns a symbolic::Expression representing a Lyapunov function using
* the symbolic Variables named x0, x1..., where the order matches the
* continuous state vector in the @p context, or the vector state_variables
* passed in through the options structure (if it is non-empty). The level set
* {x | V(x)<=1} containing the fixed-point in @p context represents the region
* of attraction.
*
* @pre For the given @p system and @p context, any required input ports on @p
* system must be "defined", i.e., connected to other systems in a larger
* diagram or holding fixed values; see System::FixInputPortsFrom for possible
* caveats. Analyzing a closed-loop system would typically be accomplished by
* having both the plant and the controller in a diagram (which then has no
* input ports), and passing the diagram into this method as @p system.
*
* Note: There are more numerical recipes for region of attraction analysis that
* could extend the current implementation. Do report an issue if you discover a
* system for which this code does not perform well.
*
* @ingroup analysis
*/
symbolic::Expression RegionOfAttraction(
const System<double>& system, const Context<double>& context,
const RegionOfAttractionOptions& options = RegionOfAttractionOptions());
} // namespace analysis
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/simulator.h | #pragma once
#include <algorithm>
#include <chrono>
#include <limits>
#include <memory>
#include <optional>
#include <unordered_map>
#include <utility>
#include <vector>
#include "drake/common/default_scalars.h"
#include "drake/common/drake_assert.h"
#include "drake/common/drake_copyable.h"
#include "drake/common/extract_double.h"
#include "drake/common/name_value.h"
#include "drake/systems/analysis/integrator_base.h"
#include "drake/systems/analysis/simulator_config.h"
#include "drake/systems/analysis/simulator_status.h"
#include "drake/systems/framework/context.h"
#include "drake/systems/framework/system.h"
#include "drake/systems/framework/witness_function.h"
namespace drake {
namespace systems {
#ifndef DRAKE_DOXYGEN_CXX
namespace internal {
template <typename T>
class SimulatorPythonInternal;
} // namespace internal
#endif
/// @ingroup simulation
/// Parameters for fine control of simulator initialization.
/// @see Simulator<T>::Initialize().
struct InitializeParams {
/** Passes this object to an Archive.
Refer to @ref yaml_serialization "YAML Serialization" for background. */
template <typename Archive>
void Serialize(Archive* a) {
a->Visit(DRAKE_NVP(suppress_initialization_events));
}
/// Whether to trigger initialization events. Events are triggered by
/// default; it may be useful to suppress them when reusing a simulator.
bool suppress_initialization_events{false};
};
/** @ingroup simulation
A class for advancing the state of hybrid dynamic systems, represented by
`System<T>` objects, forward in time. Starting with an initial Context for a
given System, %Simulator advances time and produces a series of Context
values that forms a trajectory satisfying the system's dynamic equations to
a specified accuracy. Only the Context is modified by a %Simulator; the
System is const.
A Drake System is a continuous/discrete/hybrid dynamic system where the
continuous part is a DAE, that is, it is expected to consist of a set of
differential equations and bilateral algebraic constraints. The set of
active constraints may change as a result of particular events, such as
contact.
Given a current Context, we expect a System to provide us with
- derivatives for the continuous differential equations that already satisfy
the differentiated form of the constraints (typically, acceleration
constraints),
- a projection method for least-squares correction of violated higher-level
constraints (position and velocity level),
- a time-of-next-update method that can be used to adjust the integrator
step size in preparation for a discrete update,
- methods that can update discrete variables when their update time is
reached,
- witness (guard) functions for event isolation,
- event handlers (reset functions) for making appropriate changes to state
and mode variables when an event has been isolated.
The continuous parts of the trajectory are advanced using a numerical
integrator. Different integrators have different properties; you can choose
the one that is most appropriate for your application or use the default
which is adequate for most systems.
<h3>How the simulation is stepped: simulation mechanics for authors of discrete
and hybrid systems</h3>
This section is targeted toward users who have created a LeafSystem implementing
a discrete or hybrid system. For authors of such systems, it can be useful to
understand the simulation details in order to attain the desired state behavior
over time. This behavior is dependent on the ordering in which discrete events
and continuous updates are processed. (By "discrete events" we mean to include
any of Drake's event handlers.) The basic issues and terminology are
introduced in the @ref discrete_systems module; please look there first before
proceeding.
As pictured in @ref discrete_systems, when a continuous-time system has
discrete events, the state x can have two significant values at the event
time t. These are
- x⁻(t), the value of x _before_ the discrete update occurs (○ markers), and
- x⁺(t), the value of x _after_ the discrete update occurs (● markers).
Thus the value of the Context, which contains both time and state, advances
from {t, x⁻(t)} to {t, x⁺(t)} as a result of the update. While those Context
values are user-visible, the details of stepping here require an intermediate
value which we'll denote {t, x*(t)}.
Recall that Drake's state x is partitioned into continuous, discrete, and
abstract partitions xc, xd, and xa, so `x = { xc, xd, xa }`. Within a single
step, these are updated in three stages:
-# Unrestricted update (can change x)
-# Discrete update (can change only xd)
-# Continuous update (changes t and xc)
Where needed, we extend the above notation to xc⁻, xa⁺, etc. to indicate the
value of an individual partition at a particular stage of the stepping
algorithm.
The following pseudocode uses the above notation to describe the algorithm
"Step()" that the %Simulator uses to incrementally advance the system
trajectory (time t and state x). The Simulator's AdvanceTo() method will be
defined in terms of Step below. In general, the length of a step is not known a
priori and is determined by the Step() algorithm. Each step consists of zero or
more unrestricted updates, followed by zero or more discrete updates, followed
by (possibly zero-length) continuous time and state advancement, followed by
zero or more publishes, and then a call to the monitor() function if one has
been defined.
Updates, publishes, and the monitor can report errors or detect a
termination condition; that is not shown in the pseudocode below. We follow
this policy:
- If any unrestricted update event fails, we leave the state unchanged and
report failure. We leave unspecified whether the handlers for other
simultaneous unrestricted update events are executed or skipped in this case.
(That could affect behavior if they have side effects but in any case the
state will not be modified.)
- Next, if any discrete update event fails, we report failure. In this case
the state may have been partially updated; don't assume it has been left
unchanged. We leave unspecified whether the handlers for other simultaneous
discrete events are executed.
- Next, if any publish event fails, we _continue_ executing the handlers for
all simultaneous publish events, and report failure after they have all been
executed. The state is returned as updated since publish events can have
external consequences based on that updated state.
- A "reached termination" status from any event handler permits continued
processing of simultaneous events, but doesn't permit time to advance
any further.
The pseudocode will clarify the effects on time and state of each of the update
stages above. This algorithm is given a starting Context value `{tₛ, x⁻(tₛ)}`
and returns an end Context value `{tₑ, x⁻(tₑ)}`, where tₑ is _no later_ than a
given tₘₐₓ.
```
// Advance the trajectory (time and state) from start value {tₛ, x⁻(tₛ)} to an
// end value {tₑ, x⁻(tₑ)}, where tₛ ≤ tₑ ≤ tₘₐₓ.
procedure Step(tₛ, x⁻(tₛ), tₘₐₓ)
// Update any variables (no restrictions).
x*(tₛ) ← DoAnyUnrestrictedUpdates(tₛ, x⁻(tₛ))
// ----------------------------------
// Time and state are at {tₛ, x*(tₛ)}
// ----------------------------------
// Update discrete variables.
xd⁺(tₛ) ← DoAnyDiscreteUpdates(tₛ, x*(tₛ))
xc⁺(tₛ) ← xc*(tₛ) // These values carry over from x*(tₛ).
xa⁺(tₛ) ← xa*(tₛ)
// ----------------------------------
// Time and state are at {tₛ, x⁺(tₛ)}
// ----------------------------------
// See how far it is safe to integrate without missing any events.
tₑᵥₑₙₜ ← CalcNextEventTime(tₛ, x⁺(tₛ))
// Integrate continuous variables forward in time. Integration may terminate
// before reaching tₛₜₒₚ due to witnessed events.
tₛₜₒₚ ← min(tₑᵥₑₙₜ, tₘₐₓ)
tₑ, xc⁻(tₑ) ← Integrate(tₛ, x⁺(tₛ), tₛₜₒₚ)
xd⁻(tₑ) ← xd⁺(tₛ) // Discrete values are held from x⁺(tₛ).
xa⁻(tₑ) ← xa⁺(tₛ)
// ----------------------------------
// Time and state are at {tₑ, x⁻(tₑ)}
// ----------------------------------
DoAnyPublishes(tₑ, x⁻(tₑ))
CallMonitor(tₑ, x⁻(tₑ))
return {tₑ, x⁻(tₑ)}
```
We can use the notation and pseudocode to flesh out the AdvanceTo(),
AdvancePendingEvents(), and Initialize() functions. Termination and error
conditions detected by event handlers or the monitor are reported as status
returns from these methods.
```
// Advance the simulation until time tₘₐₓ.
procedure AdvanceTo(tₘₐₓ) → status
t ← current_time
while t < tₘₐₓ
{tₑ, x⁻(tₑ)} ← Step(t, x⁻(t), tₘₐₓ)
{t, x⁻(t)} ← {tₑ, x⁻(tₑ)}
endwhile
// AdvancePendingEvents() is an advanced method, not commonly used.
// Perform just the start-of-step update to advance from x⁻(t) to x⁺(t).
procedure AdvancePendingEvents() → status
t ≜ current_time, x⁻(t) ≜ current_state
x⁺(t) ← DoAnyPendingUpdates(t, x⁻(t)) as in Step()
x(t) ← x⁺(t) // No continuous update needed.
DoAnyPublishes(t, x(t))
CallMonitor(t, x(t))
// Update time and state to {t₀, x⁻(t₀)}, which is the starting value of the
// trajectory, and thus the value the Context should contain at the start of the
// first simulation step.
procedure Initialize(t₀, x₀) → status
// Initialization events can be optionally suppressed.
x⁺(t₀) ← DoAnyInitializationUpdates as in Step()
x⁻(t₀) ← x⁺(t₀) // No continuous update needed.
// ----------------------------------
// Time and state are at {t₀, x⁻(t₀)}
// ----------------------------------
DoAnyPublishes(t₀, x⁻(t₀))
CallMonitor(t₀, x⁻(t₀))
```
Initialize() can be viewed as a "0ᵗʰ step" that occurs before the first Step()
call as described above. Like Step(), Initialize() first performs pending
updates (in this case only initialization events can be "pending", and even
those may be optionally suppressed). Time doesn't advance so there is no
continuous update phase and witnesses cannot trigger. Finally, again like
Step(), the initial trajectory point `{t₀, x⁻(t₀)}` is provided to the handlers
for any triggered publish events. That includes initialization publish events
(if not suppressed), per-step publish events, and periodic or timed publish
events that trigger at t₀, followed by a call to the monitor() function if one
has been defined (a monitor is semantically identical to a per-step publish).
Optionally, initialization events can be suppressed. This can be useful when
reusing the simulator over the same system and time span.
@tparam_nonsymbolic_scalar
*/
template <typename T>
class Simulator {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(Simulator)
/// Create a %Simulator that can advance a given System through time to
/// produce a trajectory consisting of a sequence of Context values. The
/// System must not have unresolved input ports if the values of those ports
/// are necessary for computations performed during simulation (see class
/// documentation).
///
/// The Simulator holds an internal, non-owned reference to the System
/// object so you must ensure that `system` has a longer lifetime than the
/// %Simulator. It also owns a compatible Context internally that takes on
/// each of the trajectory values. You may optionally provide a Context that
/// will be used as the initial condition for the simulation; otherwise the
/// %Simulator will obtain a default Context from `system`.
explicit Simulator(const System<T>& system,
std::unique_ptr<Context<T>> context = nullptr);
/// Create a %Simulator which additionally maintains ownership of the System.
///
/// @exclude_from_pydrake_mkdoc{The prior overload's docstring is better, and
/// we only need one of the two -- overloading on ownership doesn't make
/// sense for pydrake.}
Simulator(std::unique_ptr<const System<T>> system,
std::unique_ptr<Context<T>> context = nullptr);
// TODO(sherm1) Make Initialize() attempt to satisfy constraints.
/// Prepares the %Simulator for a simulation. In order, the sequence of
/// actions taken here is:
/// - The active integrator's Initialize() method is invoked.
/// - Statistics are reset.
/// - By default, initialization update events are triggered and handled to
/// produce the initial trajectory value `{t₀, x(t₀)}`. If initialization
/// events are suppressed, it is the caller's responsibility to ensure the
/// desired initial state.
/// - Then that initial value is provided to the handlers for any publish
/// events that have triggered, including initialization events if any, and
/// per-step publish events, periodic or other time-triggered publish
/// events that are scheduled for the initial time t₀, and finally a call
/// to the monitor() function if one has been defined.
///
/// See the class documentation for more information. We recommend calling
/// Initialize() explicitly prior to beginning a simulation so that error
/// conditions will be discovered early. However, Initialize() will be called
/// automatically by the first AdvanceTo() call if it hasn't already been
/// called.
///
/// @note If you make a change to the Context or to Simulator options between
/// AdvanceTo() calls you should consider whether to call Initialize() before
/// resuming; AdvanceTo() will not do that automatically for you. Whether to
/// do so depends on whether you want the above initialization operations
/// performed.
///
/// @note In particular, if you changed the time you must call Initialize().
/// The time-triggered events must be recalculated in case one is due at the
/// new starting time. The AdvanceTo() call will throw an exception if the
/// Initialize() call is missing.
///
/// @note The only way to suppress initialization events is by calling
/// Initialize() explicitly with the `suppress_initialization_events`
/// parameter set. The most common scenario for this is when
/// reusing a Simulator object. In this case, the caller is responsible for
/// ensuring the correctness of the initial state.
///
/// @warning Initialize() does not automatically attempt to satisfy System
/// constraints -- it is up to you to make sure that constraints are
/// satisfied by the initial conditions.
///
/// @throws std::exception if the combination of options doesn't make sense
/// or if any handled event reports failure. Other error conditions are
/// possible from the System and integrator in use.
///
/// @param params (optional) a parameter structure (@see InitializeParams).
///
/// @retval status A SimulatorStatus object indicating success, termination,
/// or an error condition as reported by event handlers or
/// the monitor function.
/// @see AdvanceTo(), AdvancePendingEvents(), SimulatorStatus
SimulatorStatus Initialize(const InitializeParams& params = {});
/// Advances the System's trajectory until `boundary_time` is reached in
/// the Context or some other termination condition occurs.
///
/// We recommend that you call Initialize() prior to making the first call
/// to AdvanceTo(). However, if you don't it will be called for you the first
/// time that you attempt a step, possibly resulting in unexpected error
/// conditions. See documentation for `Initialize()` for the error conditions
/// it might produce.
///
/// @warning You should consider calling Initialize() if you alter the
/// the Context or Simulator options between successive AdvanceTo() calls. See
/// Initialize() for more information.
///
/// @note You can track simulation progress to terminate on arbitrary
/// conditions using a _monitor_ function; see set_monitor().
///
/// @throws std::exception if any handled event reports failure. Other error
/// conditions are possible from the System and integrator in use.
///
/// @param boundary_time The maximum time to which the trajectory will be
/// advanced by this call to %AdvanceTo(). The method may return earlier
/// if an event or the monitor function requests termination or reports
/// an error condition.
/// @retval status A SimulatorStatus object indicating success, termination,
/// or an error condition as reported by event handlers or the monitor
/// function. The time in the context will be set either to the
/// boundary_time or the time a termination or error was first detected.
///
/// @pre The internal Context satisfies all System constraints or will after
/// pending Context updates are performed.
/// @see Initialize(), AdvancePendingEvents(), SimulatorStatus, set_monitor()
SimulatorStatus AdvanceTo(const T& boundary_time);
/// (Advanced) Handles discrete and abstract state update events that are
/// pending from the previous AdvanceTo() call, without advancing time.
/// See the %Simulator class description for details about how %Simulator
/// advances time and handles events. In the terminology used there, this
/// method advances the internal Context from `{t, x⁻(t)}` to `{t, x⁺(t)}`.
///
/// Normally, these update events would be handled at the start of the next
/// AdvanceTo() call, so this method is rarely needed. It can be useful
/// at the end of a simulation or to get intermediate results when you are
/// specifically interested in the `x⁺(t)` result.
///
/// This method is equivalent to `AdvanceTo(current_time)`, where
/// `current_time=simulator.get_context().get_time())`. If there are no
/// pending events, nothing happens except possibly a final per-step publish
/// call (if enabled) followed by a call to the monitor() function (if one
/// has been provided).
///
/// @throws std::exception if any handled event reports failure.
///
/// @retval status A SimulatorStatus object indicating success, termination,
/// or an error condition as reported by event handlers or
/// the monitor function.
/// @see AdvanceTo(), Initialize(), SimulatorStatus
SimulatorStatus AdvancePendingEvents() {
return AdvanceTo(get_context().get_time());
}
/// Provides a monitoring function that will be invoked at the end of
/// every step. (See the Simulator class documentation for a precise
/// definition of "step".) A monitor() function can be used to capture the
/// trajectory, to terminate the simulation, or to detect error conditions.
/// The monitor() function is invoked by the %Simulator with a Context whose
/// value is a point along the simulated trajectory. The monitor can be any
/// functor and should capture any System references it needs to operate
/// correctly.
///
/// A monitor() function behaves the same as would a per-step Publish event
/// handler included in the top-level System or Diagram being simulated. As in
/// the case of Publish(), the monitor is called at the end of every step
/// taken internally by AdvanceTo(), and also at the end of Initialize() and
/// AdvancePendingEvents(). (See the Simulator class documentation for more
/// detail about what happens when in these methods.) The monitor receives the
/// top-level (root) Context, from which any sub-Context can be obtained using
/// `subsystem.GetMyContextFromRoot()`, provided the necessary subsystem
/// reference has been captured for use in the monitor.
///
/// #### Examples
/// Output time and continuous states whenever the trajectory is advanced:
/// @code
/// simulator.set_monitor([](const Context<T>& root_context) {
/// std::cout << root_context.get_time() << " "
/// << root_context.get_continuous_state_vector()
/// << std::endl;
/// return EventStatus::Succeeded();
/// });
/// @endcode
///
/// Terminate early but successfully on a condition in a subsystem of the
/// System diagram being simulated:
/// @code
/// simulator.set_monitor([&my_subsystem](const Context<T>& root_context) {
/// const Context<T>& subcontext =
/// my_subsystem.GetMyContextFromRoot(root_context);
/// if (my_subsystem.GoalReached(subcontext)) {
/// return EventStatus::ReachedTermination(my_subsystem,
/// "Simulation achieved the desired goal.");
/// }
/// return EventStatus::Succeeded();
/// });
/// @endcode
/// In the above case, the Simulator's AdvanceTo() method will return early
/// when the subsystem reports that it has reached its goal. The returned
/// status will indicate the termination reason, and a human-readable
/// termination message containing the message provided by the monitor can be
/// obtained with status.FormatMessage().
///
/// Failure due to plant center of mass falling below a threshold:
/// @code
/// simulator.set_monitor([&plant](const Context<T>& root_context) {
/// const Context<T>& plant_context =
/// plant.GetMyContextFromRoot(root_context);
/// const Vector3<T> com =
/// plant.CalcCenterOfMassPositionInWorld(plant_context);
/// if (com[2] < 0.1) { // Check z height of com.
/// return EventStatus::Failed(plant, "System fell over.");
/// }
/// return EventStatus::Succeeded();
/// });
/// @endcode
/// In the above case the Simulator's AdvanceTo() method will throw an
/// std::exception containing a human-readable message including
/// the text provided in the monitor.
///
/// @note monitor() is called every time the trajectory is advanced by a step,
/// which can mean it is called many times during a single AdvanceTo() call.
///
/// @note The presence of a monitor has no effect on the step sizes taken,
/// so a termination or error condition will be discovered only when first
/// observed after a step is complete; it will not be further localized. Use
/// witness-triggered events instead if you need precise isolation.
void set_monitor(std::function<EventStatus(const Context<T>&)> monitor) {
monitor_ = std::move(monitor);
}
/// Removes the monitoring function if there is one.
/// @see set_monitor()
void clear_monitor() { monitor_ = nullptr; }
/// Obtains a reference to the monitoring function, which may be empty.
/// @see set_monitor()
const std::function<EventStatus(const Context<T>&)>& get_monitor() const {
return monitor_;
}
// TODO(sherm1): Provide options for issuing a warning or aborting the
// simulation if the desired rate cannot be achieved.
/// Slow the simulation down to *approximately* synchronize with real time
/// when it would otherwise run too fast. Normally the %Simulator takes steps
/// as quickly as it can. You can request that it slow down to synchronize
/// with real time by providing a realtime rate greater than zero here.
///
/// @warning No guarantees can be made about how accurately the simulation
/// can be made to track real time, even if computation is fast enough. That's
/// because the system utilities used to implement this do not themselves
/// provide such guarantees. So this is likely to work nicely for
/// visualization purposes where human perception is the only concern. For any
/// other uses you should consider whether approximate real time is adequate
/// for your purposes.
///
/// @note If the full-speed simulation is already slower than real time you
/// can't speed it up with this call! Instead consider requesting less
/// integration accuracy, using a faster integration method or fixed time
/// step, or using a simpler model.
///
/// @param realtime_rate
/// Desired rate relative to real time. Set to 1 to track real time, 2 to
/// run twice as fast as real time, 0.5 for half speed, etc. Zero or
/// negative restores the rate to its default of 0, meaning the simulation
/// will proceed as fast as possible.
void set_target_realtime_rate(double realtime_rate) {
target_realtime_rate_ = std::max(realtime_rate, 0.);
}
/// Return the real time rate target currently in effect. The default is
/// zero, meaning the %Simulator runs as fast as possible. You can change the
/// target with set_target_realtime_rate().
double get_target_realtime_rate() const {
return target_realtime_rate_;
}
/// Return the rate that simulated time has progressed relative to real time.
/// A return of 1 means the simulation just matched real
/// time, 2 means the simulation was twice as fast as real time, 0.5 means
/// it was running in 2X slow motion, etc.
///
/// The value returned here is calculated as follows: <pre>
///
/// simulated_time_now - initial_simulated_time
/// rate = -------------------------------------------
/// realtime_now - initial_realtime
/// </pre>
/// The `initial` times are recorded when Initialize() or ResetStatistics()
/// is called. The returned rate is undefined if Initialize() has not yet
/// been called.
///
/// @returns The rate achieved since the last Initialize() or
/// ResetStatistics() call.
///
/// @see set_target_realtime_rate()
double get_actual_realtime_rate() const;
/// (To be deprecated) Prefer using per-step publish events instead.
///
/// Sets whether the simulation should trigger a forced-Publish event on the
/// System under simulation at the end of every trajectory-advancing step.
/// Specifically, that means the System::Publish() event dispatcher will be
/// invoked on each subsystem of the System and passed the current Context
/// and a forced-publish Event. If a subsystem has declared a forced-publish
/// event handler, that will be called. Otherwise, nothing will happen.
///
/// Enabling this option does not cause a forced-publish to be triggered at
/// initialization; if you want that you should also call
/// `set_publish_at_initialization(true)`. If you want a forced-publish at the
/// end of every step, you will usually also want one at the end of
/// initialization, requiring both options to be enabled.
///
/// @see LeafSystem::DeclarePerStepPublishEvent()
/// @see LeafSystem::DeclareForcedPublishEvent()
void set_publish_every_time_step(bool publish) {
publish_every_time_step_ = publish;
}
/// (To be deprecated) Prefer using initialization or per-step publish
/// events instead.
///
/// Sets whether the simulation should trigger a forced-Publish at the end
/// of Initialize(). See set_publish_every_time_step() documentation for
/// more information.
///
/// @see LeafSystem::DeclareInitializationPublishEvent()
/// @see LeafSystem::DeclarePerStepPublishEvent()
/// @see LeafSystem::DeclareForcedPublishEvent()
void set_publish_at_initialization(bool publish) {
publish_at_initialization_ = publish;
}
/// Returns true if the set_publish_every_time_step() option has been
/// enabled. By default, returns false.
bool get_publish_every_time_step() const { return publish_every_time_step_; }
/// Returns a const reference to the internally-maintained Context holding the
/// most recent step in the trajectory. This is suitable for publishing or
/// extracting information about this trajectory step. Do not call this method
/// if there is no Context.
const Context<T>& get_context() const {
DRAKE_ASSERT(context_ != nullptr);
return *context_;
}
/// Returns a mutable reference to the internally-maintained Context holding
/// the most recent step in the trajectory. This is suitable for use in
/// updates, sampling operations, event handlers, and constraint projection.
/// You can also modify this prior to calling Initialize() to set initial
/// conditions. Do not call this method if there is no Context.
Context<T>& get_mutable_context() {
DRAKE_ASSERT(context_ != nullptr);
return *context_;
}
/// Returns `true` if this Simulator has an internally-maintained Context.
/// This is always true unless `reset_context()` has been called.
bool has_context() const { return context_ != nullptr; }
/// Replace the internally-maintained Context with a different one. The
/// current Context is deleted. This is useful for supplying a new set of
/// initial conditions. You should invoke Initialize() after replacing the
/// Context.
/// @param context The new context, which may be null. If the context is
/// null, a new context must be set before attempting to step
/// the system forward.
void reset_context(std::unique_ptr<Context<T>> context) {
context_ = std::move(context);
integrator_->reset_context(context_.get());
initialization_done_ = false;
}
/// Transfer ownership of this %Simulator's internal Context to the caller.
/// The %Simulator will no longer contain a Context. The caller must not
/// attempt to advance the simulator in time after that point.
/// @sa reset_context()
std::unique_ptr<Context<T>> release_context() {
integrator_->reset_context(nullptr);
initialization_done_ = false;
return std::move(context_);
}
/// Forget accumulated statistics. Statistics are reset to the values they
/// have post construction or immediately after `Initialize()`.
void ResetStatistics();
/// Gets the number of steps since the last Initialize() or ResetStatistics()
/// call. (We're not counting the Initialize() 0-length "step".) Note that
/// every AdvanceTo() call can potentially take many steps.
int64_t get_num_steps_taken() const { return num_steps_taken_; }
/// Gets the number of effective publish dispatcher calls made since the last
/// Initialize() or ResetStatistics() call. A dispatch is ineffective (not
/// counted) if _any_ of the publish events fails or _all_ the publish events
/// return "did nothing". A single dispatcher call may handle multiple publish
/// events.
int64_t get_num_publishes() const { return num_publishes_; }
/// Gets the number of effective discrete variable update dispatcher calls
/// since the last Initialize() or ResetStatistics() call. A dispatch is
/// ineffective (not counted) if _any_ of the discrete update events fails or
/// _all_ the discrete update events return "did nothing". A single dispatcher
/// call may handle multiple discrete update events.
int64_t get_num_discrete_updates() const { return num_discrete_updates_; }
/// Gets the number of effective unrestricted update dispatcher calls since
/// the last Initialize() or ResetStatistics() call. A dispatch is ineffective
/// (not counted) if _any_ of the unrestricted update events fails or _all_
/// the unrestricted update events return "did nothing". A single dispatcher
/// call may handle multiple unrestricted update events.
int64_t get_num_unrestricted_updates() const {
return num_unrestricted_updates_; }
/// Gets a reference to the integrator used to advance the continuous aspects
/// of the system.
const IntegratorBase<T>& get_integrator() const { return *integrator_.get(); }
/// Gets a reference to the mutable integrator used to advance the continuous
/// state of the system.
IntegratorBase<T>& get_mutable_integrator() { return *integrator_.get(); }
/// Resets the integrator with a new one using factory construction.
/// @code
/// simulator.reset_integrator<RungeKutta3Integrator<double>>().
/// @endcode
/// Resetting the integrator resets the %Simulator such that it needs to be
/// initialized again -- see Initialize() for details.
/// @note Integrator needs a constructor of the form
/// Integrator(const System&, Context*); this
/// constructor is usually associated with error-controlled integrators.
template <class Integrator>
Integrator& reset_integrator() {
static_assert(
std::is_constructible_v<Integrator, const System<T>&, Context<T>*>,
"Integrator needs a constructor of the form "
"Integrator::Integrator(const System&, Context*); this "
"constructor is usually associated with error-controlled integrators.");
integrator_ =
std::make_unique<Integrator>(get_system(), &get_mutable_context());
initialization_done_ = false;
return *static_cast<Integrator*>(integrator_.get());
}
/// Resets the integrator with a new one using factory construction and a
/// maximum step size argument (which is required for constructing fixed-step
/// integrators).
/// @code
/// simulator.reset_integrator<RungeKutta2Integrator<double>>(0.1).
/// @endcode
/// @see argument-less version of reset_integrator() for note about
/// initialization.
/// @note Integrator needs a constructor of the form
/// Integrator(const System&, const T&, Context*); this
/// constructor is usually associated with fixed-step integrators (i.e.,
/// integrators which do not support error estimation).
template <class Integrator>
Integrator& reset_integrator(const T max_step_size) {
static_assert(
std::is_constructible_v<Integrator, const System<T>&, double,
Context<T>*>,
"Integrator needs a constructor of the form "
"Integrator::Integrator(const System&, const T&, Context*); this "
"constructor is usually associated with fixed-step integrators.");
integrator_ = std::make_unique<Integrator>(get_system(), max_step_size,
&get_mutable_context());
initialization_done_ = false;
return *static_cast<Integrator*>(integrator_.get());
}
/// Gets the length of the interval used for witness function time isolation.
/// The length of the interval is computed differently, depending on context,
/// to support multiple applications, as described below:
///
/// * **Simulations using error controlled integrators**: the isolation time
/// interval will be scaled by the product of the system's characteristic
/// time and the accuracy stored in the Context.
/// * **Simulations using integrators taking fixed steps**: the isolation time
/// interval will be determined differently depending on whether the
/// accuracy is set in the Context or not. If the accuracy *is* set in the
/// Context, the nominally fixed steps for integrating continuous state will
/// be subdivided until events have been isolated to the requisite interval
/// length, which is scaled by the step size times the accuracy in the
/// Context. If accuracy is not set in the Context, event isolation will
/// not be performed.
///
/// The isolation window length will never be smaller than the integrator's
/// working minimum tolerance (see
/// IntegratorBase::get_working_minimum_step_size());
///
/// @returns the isolation window if the Simulator should be isolating
/// witness-triggered events in time, or returns empty otherwise
/// (indicating that any witness-triggered events should trigger
/// at the end of a time interval over which continuous state is
/// integrated).
/// @throws std::exception if the accuracy is not set in the Context and
/// the integrator is not operating in fixed step mode (see
/// IntegratorBase::get_fixed_step_mode().
std::optional<T> GetCurrentWitnessTimeIsolation() const;
/// Gets a constant reference to the system.
/// @note a mutable reference is not available.
const System<T>& get_system() const { return system_; }
private:
template <typename> friend class internal::SimulatorPythonInternal;
enum TimeOrWitnessTriggered {
kNothingTriggered = 0b00,
kTimeTriggered = 0b01,
kWitnessTriggered = 0b10,
kBothTriggered = 0b11
};
// All constructors delegate to here.
Simulator(
const System<T>* system,
std::unique_ptr<const System<T>> owned_system,
std::unique_ptr<Context<T>> context);
[[nodiscard]] EventStatus HandleUnrestrictedUpdate(
const EventCollection<UnrestrictedUpdateEvent<T>>& events);
[[nodiscard]] EventStatus HandleDiscreteUpdate(
const EventCollection<DiscreteUpdateEvent<T>>& events);
[[nodiscard]] EventStatus HandlePublish(
const EventCollection<PublishEvent<T>>& events);
// If an event handler failed, we have to interrupt the simulation.
// In that case, updates the SimulatorStatus to explain what happened and
// then optionally throws or returns true. Returns false and does nothing
// if no failure.
bool HasEventFailureOrMaybeThrow(
const EventStatus& event_status, bool throw_on_failure,
SimulatorStatus* simulator_status);
TimeOrWitnessTriggered IntegrateContinuousState(
const T& next_publish_time,
const T& next_update_time,
const T& boundary_time,
CompositeEventCollection<T>* witnessed_events);
// Private methods related to witness functions.
void IsolateWitnessTriggers(
const std::vector<const WitnessFunction<T>*>& witnesses,
const VectorX<T>& w0,
const T& t0, const VectorX<T>& x0, const T& tf,
std::vector<const WitnessFunction<T>*>* triggered_witnesses);
void PopulateEventDataForTriggeredWitness(
const T& t0, const T& tf, const WitnessFunction<T>* witness,
Event<T>* event, CompositeEventCollection<T>* events) const;
static bool DidWitnessTrigger(
const std::vector<const WitnessFunction<T>*>& witness_functions,
const VectorX<T>& w0,
const VectorX<T>& wf,
std::vector<const WitnessFunction<T>*>* triggered_witnesses);
VectorX<T> EvaluateWitnessFunctions(
const std::vector<const WitnessFunction<T>*>& witness_functions,
const Context<T>& context) const;
void RedetermineActiveWitnessFunctionsIfNecessary();
// The steady_clock is immune to system clock changes so increases
// monotonically. We'll work in fractional seconds.
using Clock = std::chrono::steady_clock;
using Duration = std::chrono::duration<double>;
using TimePoint = std::chrono::time_point<Clock, Duration>;
// If the simulated time in the context is ahead of real time, pause long
// enough to let real time catch up (approximately).
void PauseIfTooFast() const;
// A pointer to the integrator.
std::unique_ptr<IntegratorBase<T>> integrator_;
// TODO(sherm1) This a workaround for an apparent bug in clang 3.8 in which
// defining this as a static constexpr member kNaN failed to instantiate
// properly for the AutoDiffXd instantiation (worked in gcc and MSVC).
// Restore to sanity when some later clang is current.
static constexpr double nan() {
return std::numeric_limits<double>::quiet_NaN();
}
// Do not use this. This is valid iff the constructor is passed a
// unique_ptr (allowing the Simulator to maintain ownership). Use the
// system_ variable instead, which is valid always.
const std::unique_ptr<const System<T>> owned_system_;
const System<T>& system_; // Just a reference; not owned.
std::unique_ptr<Context<T>> context_; // The trajectory Context.
// Temporaries used for witness function isolation.
std::vector<const WitnessFunction<T>*> triggered_witnesses_;
VectorX<T> w0_, wf_;
// Slow down to this rate if possible (user settable).
double target_realtime_rate_{SimulatorConfig{}.target_realtime_rate};
bool publish_every_time_step_{SimulatorConfig{}.publish_every_time_step};
bool publish_at_initialization_{SimulatorConfig{}.publish_every_time_step};
// These are recorded at initialization or statistics reset.
double initial_simtime_{nan()}; // Simulated time at start of period.
TimePoint initial_realtime_; // Real time at start of period.
// The number of discrete updates since the last statistics reset.
int64_t num_discrete_updates_{0};
// The number of unrestricted updates since the last statistics reset.
int64_t num_unrestricted_updates_{0};
// The number of publishes since the last statistics reset.
int64_t num_publishes_{0};
// The number of integration steps since the last statistics reset.
int64_t num_steps_taken_{0};
// Set by Initialize() and reset by various traumas.
bool initialization_done_{false};
// Set by Initialize() and AdvanceTo(). Used to detect unexpected jumps in
// time.
double last_known_simtime_{nan()};
// The vector of active witness functions.
std::unique_ptr<std::vector<const WitnessFunction<T>*>> witness_functions_;
// Indicator for whether the Simulator needs to redetermine the active witness
// functions.
bool redetermine_active_witnesses_{true};
// Per step events that are to be handled on every "major time step" (i.e.,
// every successful completion of a step). This collection is constructed
// within Initialize().
std::unique_ptr<CompositeEventCollection<T>> per_step_events_;
// Timed events can be triggered either at a particular time (like an alarm)
// or periodically. This collection is constructed within Initialize().
std::unique_ptr<CompositeEventCollection<T>> timed_events_;
// Witnessed events are triggered as a witness function crosses zero during
// AdvanceTo(). This collection is constructed within Initialize().
std::unique_ptr<CompositeEventCollection<T>> witnessed_events_;
// All events merged together. This collection is constructed within
// Initialize().
std::unique_ptr<CompositeEventCollection<T>> merged_events_;
// Indicates when a timed or witnessed event needs to be handled on the next
// call to AdvanceTo().
TimeOrWitnessTriggered time_or_witness_triggered_{
TimeOrWitnessTriggered::kNothingTriggered
};
// Pre-allocated temporaries for updated discrete states.
std::unique_ptr<DiscreteValues<T>> discrete_updates_;
// Pre-allocated temporaries for states from unrestricted updates.
std::unique_ptr<State<T>> unrestricted_updates_;
// Pre-allocated temporary for ContinuousState passed to event handlers after
// witness function triggering.
std::unique_ptr<ContinuousState<T>> event_handler_xc_;
// Mapping of witness functions to pre-allocated events.
std::unordered_map<const WitnessFunction<T>*, std::unique_ptr<Event<T>>>
witness_function_events_;
// Optional monitor() method to capture trajectory, terminate, or fail.
std::function<EventStatus(const Context<T>&)> monitor_;
// Optional pydrake-internal monitor callback.
void (*python_monitor_)() = nullptr;
};
#ifndef DRAKE_DOXYGEN_CXX
namespace internal {
// This function computes the previous (i.e., that which is one step closer to
// negative infinity) *non-denormalized* (i.e., either zero or normalized)
// floating-point number from `value`. nexttoward() provides very similar
// functionality except for its proclivity for producing denormalized numbers
// that typically only result from arithmetic underflow and are hence dangerous
// to use in further floating point operations. Thus,
// GetPreviousNormalizedValue() acts like nexttoward(value, -inf) but without
// producing denormalized numbers.
// @param value a floating point value that is not infinity or NaN. Denormalized
// inputs are treated as zero to attain consistent behavior regardless
// of the setting of the FPU "treat denormalized numbers as zero" mode
// (which can be activated through linking with shared libraries that
// use gcc's -ffast-math option).
template <class T>
T GetPreviousNormalizedValue(const T& value) {
using std::nexttoward;
using std::abs;
// There are three distinct cases to be handled:
// -∞ -10⁻³⁰⁸ 0 10⁻³⁰⁸ ∞
// |-----------|------|------|----------|
// (a) ^ ^ ^ ^ [-∞, 10⁻³⁰⁸] ∪ (10⁻³⁰⁸, ∞]
// (b) ^ ^ (-10⁻³⁰⁸, 10⁻³⁰⁸)
// (c) ^ 10⁻³⁰⁸
// Treat denormalized numbers as zero. This code is designed to produce the
// same outputs for `value` regardless of the setting of the FPU's DAZ ("treat
// denormalized numbers as zero") mode.
const double min_normalized = std::numeric_limits<double>::min();
const T& value_mod = (abs(value) < min_normalized) ? 0.0 : value;
// Treat zero (b) and DBL_MIN (c) specially, since nexttoward(value, -inf)
// returns denormalized numbers for these two values.
if (value_mod == 0.0)
return -std::numeric_limits<double>::min();
if (value_mod == min_normalized)
return 0.0;
// Case (a) uses nexttoward(.).
const long double inf = std::numeric_limits<long double>::infinity();
const double prev_value = nexttoward(value, -inf);
DRAKE_DEMAND(
std::fpclassify(ExtractDoubleOrThrow(prev_value)) == FP_NORMAL ||
std::fpclassify(ExtractDoubleOrThrow(prev_value)) == FP_ZERO);
return prev_value;
}
} // namespace internal
#endif
} // namespace systems
} // namespace drake
DRAKE_DECLARE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS(
class drake::systems::Simulator)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/antiderivative_function.h | #pragma once
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "drake/common/default_scalars.h"
#include "drake/common/drake_copyable.h"
#include "drake/common/eigen_types.h"
#include "drake/common/unused.h"
#include "drake/systems/analysis/scalar_dense_output.h"
#include "drake/systems/analysis/scalar_initial_value_problem.h"
namespace drake {
namespace systems {
/// A thin wrapper of the ScalarInitialValueProblem class that, in concert with
/// Drake's ODE initial value problem solvers ("integrators"), provide the
/// ability to perform quadrature on an arbitrary scalar integrable function.
/// That is, it allows the evaluation of an antiderivative function F(u; 𝐤),
/// such that F(u; 𝐤) = ∫ᵥᵘ f(x; 𝐤) dx where f : ℝ → ℝ , u ∈ ℝ, v ∈ ℝ,
/// 𝐤 ∈ ℝᵐ. The parameter vector 𝐤 allows for generic function definitions,
/// which can later be evaluated for any instance of said vector. Also, note
/// that 𝐤 can be understood as an m-tuple or as an element of ℝᵐ, the vector
/// space, depending on how it is used by the integrable function.
///
/// See ScalarInitialValueProblem class documentation for information
/// on caching support and dense output usage for improved efficiency in
/// antiderivative function F evaluation.
///
/// For further insight into its use, consider the following examples.
///
/// - Solving the elliptic integral of the first kind
/// E(φ; ξ) = ∫ᵠ √(1 - ξ² sin² θ)⁻¹ dθ becomes straightforward by defining
/// f(x; 𝐤) ≜ √(1 - k₀² sin² x)⁻¹ with 𝐤 ≜ [ξ] and evaluating F(u; 𝐤) at
/// u = φ.
///
/// - As the bearings in a rotating machine age over time, these are more likely
/// to fail. Let γ be a random variable describing the time to first bearing
/// failure, described by a family of probability density functions gᵧ(y; l)
/// parameterized by bearing load l. In this context, the probability of a
/// bearing under load to fail during the first N months becomes
/// P(0 < γ ≤ N mo.; l) = Gᵧ(N mo.; l) - Gᵧ(0; l), where Gᵧ(y; l) is the
/// family of cumulative density functions, parameterized by bearing load l,
/// and G'ᵧ(y; l) = gᵧ(y; l). Therefore, defining f(x; 𝐤) ≜ gᵧ(x; k₀) with
/// 𝐤 ≜ [l] and evaluating F(u; 𝐤) at u = N yields the result.
///
/// @tparam_nonsymbolic_scalar
template <typename T>
class AntiderivativeFunction {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(AntiderivativeFunction);
/// Scalar integrable function f(x; 𝐤) type.
///
/// @param x The variable of integration x ∈ ℝ .
/// @param k The parameter vector 𝐤 ∈ ℝᵐ.
/// @return The function value f(@p x; @p k).
using IntegrableFunction = std::function<T(const T& x, const VectorX<T>& k)>;
/// Constructs the antiderivative function of the given
/// @p integrable_function, parameterized with @p k.
///
/// @param integrable_function The function f(x; 𝐤) to be integrated.
/// @param 𝐤 ∈ ℝᵐ is the vector of parameters. The default is the empty
/// vector (indicating no parameters).
AntiderivativeFunction(const IntegrableFunction& integrable_function,
const Eigen::Ref<const VectorX<T>>& k = Vector0<T>{});
/// Evaluates the definite integral F(u; 𝐤) = ∫ᵥᵘ f(x; 𝐤) dx from the lower
/// integration bound @p v to @p u using the parameter vector 𝐤 specified in
/// the constructor (see definition in class documentation).
///
/// @param v The lower integration bound.
/// @param u The upper integration bound.
/// @returns The value of the definite integral.
/// @throws std::exception if v > u.
T Evaluate(const T& v, const T& u) const;
/// Evaluates and yields an approximation of the definite integral
/// F(u; 𝐤) = ∫ᵥᵘ f(x; 𝐤) dx for v ≤ u ≤ w, i.e. the closed interval
/// that goes from the lower integration bound @p v to the uppermost
/// integration bound @p w, using the parameter vector 𝐤 specified in the
/// constructor (see definition in class documentation).
///
/// To this end, the wrapped IntegratorBase instance solves the integral
/// from @p v to @p w (i.e. advances the state x of its differential form
/// x'(t) = f(x; 𝐤) from @p v to @p w), creating a scalar dense output over
/// that [@p v, @p w] interval along the way.
///
/// @param v The lower integration bound.
/// @param w The uppermost integration bound. Usually, @p v < @p w as an empty
/// dense output would result if @p v = @p w.
/// @returns A dense approximation to F(u; 𝐤) (that is, a function), defined
/// for @p v ≤ u ≤ @p w.
/// @note The larger the given @p w value is, the larger the approximated
/// interval will be. See documentation of the specific dense output
/// technique used by the internally held IntegratorBase subclass
/// instance for more details.
/// @throws std::exception if v > w.
std::unique_ptr<ScalarDenseOutput<T>> MakeDenseEvalFunction(const T& v,
const T& w) const;
/// Resets the internal integrator instance.
///
/// A usage example is shown below.
/// @code{.cpp}
/// antiderivative_f.reset_integrator<RungeKutta2Integrator<T>>(max_step);
/// @endcode
///
/// @param args The integrator type-specific arguments.
/// @returns The new integrator instance.
/// @tparam Integrator The integrator type, which must be an
/// IntegratorBase subclass.
/// @tparam Args The integrator specific argument types.
/// @warning This operation invalidates pointers returned by
/// AntiderivativeFunction::get_integrator() and
/// AntiderivativeFunction::get_mutable_integrator().
template <typename Integrator, typename... Args>
Integrator* reset_integrator(Args&&... args) {
return scalar_ivp_->template reset_integrator<Integrator>(
std::forward<Args>(args)...);
}
/// Gets a reference to the internal integrator instance.
const IntegratorBase<T>& get_integrator() const {
return scalar_ivp_->get_integrator();
}
/// Gets a mutable reference to the internal integrator instance.
IntegratorBase<T>& get_mutable_integrator() {
return scalar_ivp_->get_mutable_integrator();
}
private:
// Scalar IVP used to perform quadrature.
std::unique_ptr<ScalarInitialValueProblem<T>> scalar_ivp_;
};
} // namespace systems
} // namespace drake
DRAKE_DECLARE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS(
class ::drake::systems::AntiderivativeFunction)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/monte_carlo.cc | #include "drake/systems/analysis/monte_carlo.h"
#include <future>
#include <list>
#include <mutex>
#include <thread>
#include "drake/systems/analysis/simulator.h"
#include "drake/systems/framework/system.h"
namespace drake {
namespace systems {
namespace analysis {
double RandomSimulation(
const SimulatorFactory& make_simulator, const ScalarSystemFunction& output,
const double final_time, RandomGenerator* const generator) {
auto simulator = make_simulator(generator);
const System<double>& system = simulator->get_system();
system.SetRandomContext(&simulator->get_mutable_context(), generator);
simulator->AdvanceTo(final_time);
return output(system, simulator->get_context());
}
namespace {
// Serial (single-threaded) implementation of MonteCarloSimulation.
std::vector<RandomSimulationResult> MonteCarloSimulationSerial(
const SimulatorFactory& make_simulator, const ScalarSystemFunction& output,
const double final_time, const int num_samples,
RandomGenerator* const generator) {
std::vector<RandomSimulationResult> simulation_results;
simulation_results.reserve(num_samples);
for (int sample = 0; sample < num_samples; ++sample) {
RandomSimulationResult simulation_result(*generator);
simulation_result.output =
RandomSimulation(make_simulator, output, final_time, generator);
simulation_results.push_back(std::move(simulation_result));
}
return simulation_results;
}
// Checks if a future has completed execution.
template <typename T>
bool IsFutureReady(const std::future<T>& future) {
// future.wait_for() is the only method to check the status of a future
// without waiting for it to complete.
const std::future_status status =
future.wait_for(std::chrono::milliseconds(1));
return (status == std::future_status::ready);
}
// Parallel (multi-threaded) implementation of MonteCarloSimulation.
std::vector<RandomSimulationResult> MonteCarloSimulationParallel(
const SimulatorFactory& make_simulator, const ScalarSystemFunction& output,
const double final_time, const int num_samples,
RandomGenerator* const generator, const int num_threads) {
// Initialize storage for all simulation results. The full vector must be
// constructed up front (i.e. we can't use reserve()) to avoid a race
// condition on checking the size of the vector when the worker threads write
// simulation results.
std::vector<RandomSimulationResult> simulation_results(
num_samples, RandomSimulationResult(RandomGenerator()));
// Storage for active parallel simulation operations.
std::list<std::future<int>> active_operations;
// Keep track of how many simulations have been dispatched already.
int simulations_dispatched = 0;
while (active_operations.size() > 0 ||
simulations_dispatched < num_samples) {
// Check for completed operations.
for (auto operation = active_operations.begin();
operation != active_operations.end();) {
if (IsFutureReady(*operation)) {
// This call to future.get() is necessary to propagate any exception
// thrown during simulation execution.
const int sample_num = operation->get();
drake::log()->debug("Simulation {} completed", sample_num);
// Erase returns iterator to the next node in the list.
operation = active_operations.erase(operation);
} else {
// Advance to next node in the list.
++operation;
}
}
// Dispatch new operations.
while (static_cast<int>(active_operations.size()) < num_threads
&& simulations_dispatched < num_samples) {
// Create the simulation result using the current generator state.
simulation_results.at(simulations_dispatched) =
RandomSimulationResult(*generator);
// Make the simulator.
auto simulator = make_simulator(generator);
const auto& system = simulator->get_system();
system.SetRandomContext(&simulator->get_mutable_context(), generator);
auto perform_simulation =
[simulator = std::move(simulator), &simulation_results, &output,
final_time, sample_num = simulations_dispatched] () {
simulator->AdvanceTo(final_time);
simulation_results.at(sample_num).output =
output(simulator->get_system(), simulator->get_context());
return sample_num;
};
active_operations.emplace_back(
std::async(std::launch::async, std::move(perform_simulation)));
drake::log()->debug("Simulation {} dispatched", simulations_dispatched);
++simulations_dispatched;
}
// Wait a bit before checking for completion.
// TODO(calderpg-tri) When std::when_any([std::future,...]) or equivalent is
// available, this can be replaced.
std::this_thread::sleep_for(std::chrono::milliseconds(100));
}
return simulation_results;
}
} // namespace
std::vector<RandomSimulationResult> MonteCarloSimulation(
const SimulatorFactory& make_simulator, const ScalarSystemFunction& output,
const double final_time, const int num_samples, RandomGenerator* generator,
const Parallelism parallelism) {
// Create a generator if the user didn't provide one.
std::unique_ptr<RandomGenerator> owned_generator;
if (generator == nullptr) {
owned_generator = std::make_unique<RandomGenerator>();
generator = owned_generator.get();
}
// Since the parallel implementation incurs additional overhead even in the
// num_threads=1 case, dispatch to the serial implementation in these cases.
const int num_threads = parallelism.num_threads();
if (num_threads > 1) {
return MonteCarloSimulationParallel(
make_simulator, output, final_time, num_samples, generator,
num_threads);
} else {
return MonteCarloSimulationSerial(
make_simulator, output, final_time, num_samples, generator);
}
}
} // namespace analysis
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/implicit_euler_integrator.h | #pragma once
#include <memory>
#include "drake/common/default_scalars.h"
#include "drake/common/drake_copyable.h"
#include "drake/systems/analysis/implicit_integrator.h"
#include "drake/systems/analysis/runge_kutta2_integrator.h"
namespace drake {
namespace systems {
/**
* A first-order, fully implicit integrator with second order error estimation.
*
* This integrator uses the following update rule:<pre>
* x(t+h) = x(t) + h f(t+h,x(t+h))
* </pre>
* where x are the state variables, h is the integration step size, and
* f() returns the time derivatives of the state variables. Contrast this
* update rule to that of an explicit first-order integrator:<pre>
* x(t+h) = x(t) + h f(t, x(t))
* </pre>
* Thus implicit first-order integration must solve a nonlinear system of
* equations to determine *both* the state at t+h and the time derivatives
* of that state at that time. Cast as a nonlinear system of equations,
* we seek the solution to:<pre>
* x(t+h) − x(t) − h f(t+h,x(t+h)) = 0
* </pre>
* given unknowns x(t+h).
*
* This "implicit Euler" method is known to be L-Stable, meaning both that
* applying it at a fixed integration step to the "test" equation `y(t) = eᵏᵗ`
* yields zero (for `k < 0` and `t → ∞`) *and* that it is also A-Stable.
* A-Stability, in turn, means that the method can integrate the linear constant
* coefficient system `dx/dt = Ax` at any step size without the solution
* becoming unstable (growing without bound). The practical effect of
* L-Stability is that the integrator tends to be stable for any given step size
* on an arbitrary system of ordinary differential equations. See
* [Lambert, 1991], Ch. 6 for an approachable discussion on stiff differential
* equations and L- and A-Stability.
*
* This implementation uses Newton-Raphson (NR) and relies upon the obvious
* convergence to a solution for `g = 0` where
* `g(x(t+h)) ≡ x(t+h) − x(t) − h f(t+h,x(t+h))` as `h` becomes sufficiently
* small. General implementational details for the Newton method were gleaned
* from Section IV.8 in [Hairer, 1996].
*
* ### Error Estimation
*
* In this integrator, we simultaneously take a large step at the requested
* step size of h as well as two half-sized steps each with step size `h/2`.
* The result from two half-sized steps is propagated as the solution, while
* the difference between the two results is used as the error estimate for the
* propagated solution. This error estimate is accurate to the second order.
*
* To be precise, let `x̅ⁿ⁺¹` be the computed solution from a large step,
* `x̃ⁿ⁺¹` be the computed solution from two small steps, and `xⁿ⁺¹` be the true
* solution. Since the integrator propagates `x̃ⁿ⁺¹` as its solution, we denote
* the true error vector as `ε = x̃ⁿ⁺¹ − xⁿ⁺¹`. %ImplicitEulerIntegrator uses
* `ε* = x̅ⁿ⁺¹ − x̃ⁿ⁺¹`, the difference between the two solutions, as the
* second-order error estimate, because for a smooth system, `‖ε*‖ = O(h²)`,
* and `‖ε − ε*‖ = O(h³)`. See the notes in get_error_estimate_order() for a
* detailed derivation of the error estimate's truncation error.
*
* In this implementation, %ImplicitEulerIntegrator attempts the large
* full-sized step before attempting the two small half-sized steps, because
* the large step is more likely to fail to converge, and if it is performed
* first, convergence failures are detected early, avoiding the unnecessary
* effort of computing potentially-successful small steps.
*
* Optionally, %ImplicitEulerIntegrator can instead use the implicit trapezoid
* method for error estimation. However, in our testing the step doubling method
* substantially outperforms the implicit trapezoid method.
*
* - [Hairer, 1996] E. Hairer and G. Wanner. Solving Ordinary Differential
* Equations II (Stiff and Differential-Algebraic Problems).
* Springer, 1996, Section IV.8, p. 118–130.
* - [Lambert, 1991] J. D. Lambert. Numerical Methods for Ordinary Differential
* Equations. John Wiley & Sons, 1991.
*
* @note In the statistics reported by IntegratorBase, all statistics that deal
* with the number of steps or the step sizes will track the large full-sized
* steps. This is because the large full-sized `h` is the smallest irrevocable
* time-increment advanced by this integrator: if, for example, the second small
* half-sized step fails, this integrator revokes to the state before the first
* small step. This behavior is similar to other integrators with multi-stage
* evaluation: the step-counting statistics treat a "step" as the combination of
* all the stages.
* @note Furthermore, because the small half-sized steps are propagated as the
* solution, the large full-sized step is the error estimator, and the error
* estimation statistics track the effort during the large full-sized step. If
* the integrator is not in full-Newton mode (see
* ImplicitIntegrator::set_use_full_newton()), most of the work incurred by
* constructing and factorizing matrices and by failing Newton-Raphson
* iterations will be counted toward the error estimation statistics, because
* the large step is performed first.
*
* @note This integrator uses the integrator accuracy setting, even when run
* in fixed-step mode, to limit the error in the underlying Newton-Raphson
* process. See IntegratorBase::set_target_accuracy() for more info.
* @see ImplicitIntegrator class documentation for information about implicit
* integration methods in general.
*
* @tparam_nonsymbolic_scalar
* @ingroup integrators
*/
// TODO(antequ): Investigate revamping the error estimation and normal
// statistics so that the effort spent recomputing Jacobians and iteration
// matrices near stiff steps is not overwhelmingly allocated into the error
// estimator's statistics for Jacobian computations.
template <class T>
class ImplicitEulerIntegrator final : public ImplicitIntegrator<T> {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(ImplicitEulerIntegrator)
~ImplicitEulerIntegrator() override = default;
explicit ImplicitEulerIntegrator(const System<T>& system,
Context<T>* context = nullptr)
: ImplicitIntegrator<T>(system, context) {}
/**
* Returns true, because this integrator supports error estimation.
*/
bool supports_error_estimation() const final { return true; }
/**
* Returns the asymptotic order of the difference between the large and small
* steps (from which the error estimate is computed), which is 2. That is, the
* error estimate, `ε* = x̅ⁿ⁺¹ − x̃ⁿ⁺¹` has the property that `‖ε*‖ = O(h²)`,
* and it deviates from the true error, `ε`, by `‖ε − ε*‖ = O(h³)`.
*
* ### Derivation of the asymptotic order
*
* This derivation is based on the same derivation for
* VelocityImplicitEulerIntegrator, and so the equation numbers are from
* there.
*
* To derive the second-order error estimate, let us first define the
* vector-valued function `e(tⁿ, h, xⁿ) = x̅ⁿ⁺¹ − xⁿ⁺¹`, the local truncation
* error for a single, full-sized implicit Euler integration step, with
* initial conditions `(tⁿ, xⁿ)`, and a step size of `h`. Furthermore, use
* `ẍ` to denote `df/dt`, and `∇f` and `∇ẍ` to denote the Jacobians `df/dx`
* and `dẍ/dx` of the ODE system `ẋ = f(t, x)`. Note that `ẍ` uses a total
* time derivative, i.e., `ẍ = ∂f/∂t + ∇f f`.
*
* Let us use `x*` to denote the true solution after a half-step, `x(tⁿ+½h)`,
* and `x̃*` to denote the implicit Euler solution after a single
* half-sized step. Furthermore, let us use `xⁿ*¹` to denote the true solution
* of the system at time `t = tⁿ+h` if the system were at `x̃*` when
* `t = tⁿ+½h`. See the following diagram for an illustration.
*
* Legend:
* ───── propagation along the true system
* :···· propagation using implicit Euler with a half step
* :---- propagation using implicit Euler with a full step
*
* Time tⁿ tⁿ+½h tⁿ+h
*
* State :----------------------- x̅ⁿ⁺¹ <─── used for error estimation
* :
* :
* :
* : :·········· x̃ⁿ⁺¹ <─── propagated result
* : :
* :········· x̃* ─────── xⁿ*¹
* :
* xⁿ ─────── x* ─────── xⁿ⁺¹ <─── true solution
*
* We will use superscripts to denote evaluating an expression with `x` at
* that superscript and `t` at the corresponding time, e.g. `ẍⁿ` denotes
* `ẍ(tⁿ, xⁿ)`, and `f*` denotes `f(tⁿ+½h, x*)`. We first present a shortened
* derivation, followed by the longer, detailed version.
*
* We know the local truncation error for the implicit Euler method is:
*
* e(tⁿ, h, xⁿ) = x̅ⁿ⁺¹ − xⁿ⁺¹ = ½ h²ẍⁿ + O(h³). (10)
*
* The local truncation error ε from taking two half steps is composed of
* these two terms:
*
* e₁ = xⁿ*¹ − xⁿ⁺¹ = (1/8) h²ẍⁿ + O₁(h³), (15)
* e₂ = x̃ⁿ⁺¹ − xⁿ*¹ = (1/8) h²ẍ* + O₂(h³) = (1/8) h²ẍⁿ + O₃(h³). (20)
*
* In the long derivation, we will show that these second derivatives
* differ by at most O(h³).
*
* Taking the sum,
*
* ε = x̃ⁿ⁺¹ − xⁿ⁺¹ = e₁ + e₂ = (1/4) h²ẍⁿ + O(h³). (21)
*
* These two estimations allow us to obtain an estimation of the local error
* from the difference between the available quantities x̅ⁿ⁺¹ and x̃ⁿ⁺¹:
*
* ε* = x̅ⁿ⁺¹ − x̃ⁿ⁺¹ = e(tⁿ, h, xⁿ) − ε,
* = (1/4) h²ẍⁿ + O(h³), (22)
*
* and therefore our error estimate is second order.
*
* Below we will show this derivation in detail along with the proof that
* `‖ε − ε*‖ = O(h³)`:
*
* Let us look at a single implicit Euler step. Upon Newton-Raphson
* convergence, the truncation error for implicit Euler is
*
* e(tⁿ, h, xⁿ) = ½ h²ẍⁿ⁺¹ + O(h³)
* = ½ h²ẍⁿ + O(h³). (10)
*
* To see why the two are equivalent, we can Taylor expand about `(tⁿ, xⁿ)`,
*
* ẍⁿ⁺¹ = ẍⁿ + h dẍ/dtⁿ + O(h²) = ẍⁿ + O(h).
* e(tⁿ, h, xⁿ) = ½ h²ẍⁿ⁺¹ + O(h³) = ½ h²(ẍⁿ + O(h)) + O(h³)
* = ½ h²ẍⁿ + O(h³).
*
* Moving on with our derivation, after one small half-sized implicit Euler
* step, the solution `x̃*` is
*
* x̃* = x* + e(tⁿ, ½h, xⁿ)
* = x* + (1/8) h²ẍⁿ + O(h³),
* x̃* − x* = (1/8) h²ẍⁿ + O(h³). (11)
*
* Taylor expanding about `t = tⁿ+½h` in this `x = x̃*` alternate reality,
*
* xⁿ*¹ = x̃* + ½h f(tⁿ+½h, x̃*) + O(h²). (12)
*
* Similarly, Taylor expansions about `t = tⁿ+½h` and the true solution
* `x = x*` also give us
*
* xⁿ⁺¹ = x* + ½h f* + O(h²), (13)
* f(tⁿ+½h, x̃*) = f* + (∇f*) (x̃* − x*) + O(‖x̃* − x*‖²)
* = f* + O(h²), (14)
* where in the last line we substituted Eq. (11).
*
* Eq. (12) minus Eq. (13) gives us,
*
* xⁿ*¹ − xⁿ⁺¹ = x̃* − x* + ½h(f(tⁿ+½h, x̃*) − f*) + O(h³),
* = x̃* − x* + O(h³),
* where we just substituted in Eq. (14). Finally, substituting in Eq. (11),
*
* e₁ = xⁿ*¹ − xⁿ⁺¹ = (1/8) h²ẍⁿ + O(h³). (15)
*
* After the second small step, the solution `x̃ⁿ⁺¹` is
*
* x̃ⁿ⁺¹ = xⁿ*¹ + e(tⁿ+½h, ½h, x̃*),
* = xⁿ*¹ + (1/8)h² ẍ(tⁿ+½h, x̃*) + O(h³). (16)
*
* Taking Taylor expansions about `(tⁿ, xⁿ)`,
*
* x* = xⁿ + ½h fⁿ + O(h²) = xⁿ + O(h). (17)
* x̃* − xⁿ = (x̃* − x*) + (x* − xⁿ) = O(h), (18)
* where we substituted in Eqs. (11) and (17), and
*
* ẍ(tⁿ+½h, x̃*) = ẍⁿ + ½h ∂ẍ/∂tⁿ + ∇ẍⁿ (x̃* − xⁿ) + O(h ‖x̃* − xⁿ‖)
* = ẍⁿ + O(h), (19)
* where we substituted in Eq. (18).
*
* Substituting Eqs. (19) and (15) into Eq. (16),
*
* x̃ⁿ⁺¹ = xⁿ*¹ + (1/8) h²ẍⁿ + O(h³) (20)
* = xⁿ⁺¹ + (1/4) h²ẍⁿ + O(h³),
* therefore
*
* ε = x̃ⁿ⁺¹ − xⁿ⁺¹ = (1/4) h² ẍⁿ + O(h³). (21)
*
* Subtracting Eq. (21) from Eq. (10),
*
* e(tⁿ, h, xⁿ) − ε = (½ − 1/4) h²ẍⁿ + O(h³);
* ⇒ ε* = x̅ⁿ⁺¹ − x̃ⁿ⁺¹ = (1/4) h²ẍⁿ + O(h³). (22)
*
* Eq. (22) shows that our error estimate is second-order. Since the first
* term on the RHS matches `ε` (Eq. (21)),
*
* ε* = ε + O(h³). (23)
*/
int get_error_estimate_order() const final { return 2; }
/**
* Set this to true to use implicit trapezoid for error estimation;
* otherwise this integrator will use step doubling for error estimation.
* By default this integrator will use step doubling.
*/
void set_use_implicit_trapezoid_error_estimation(bool flag) {
use_implicit_trapezoid_error_estimation_ = flag;
}
/**
* Returns true if the integrator will use implicit trapezoid for error
* estimation; otherwise it indicates the integrator will use step doubling
* for error estimation.
*/
bool get_use_implicit_trapezoid_error_estimation() {
return use_implicit_trapezoid_error_estimation_;
}
private:
// These are statistics that the base class, ImplicitIntegrator, require
// this child class to keep track of.
struct Statistics {
// See ImplicitIntegrator::get_num_jacobian_evaluations() or
// ImplicitIntegrator::get_num_error_estimator_jacobian_evaluations()
// for the definition of this statistic.
int64_t num_jacobian_reforms{0};
// See ImplicitIntegrator::get_num_iteration_matrix_factorizations() or
// ImplicitIntegrator::
// get_num_error_estimator_iteration_matrix_factorizations() for the
// definition of this statistic.
int64_t num_iter_factorizations{0};
// See IntegratorBase::get_num_derivative_evaluations() or
// ImplicitIntegrator::get_num_error_estimator_derivative_evaluations()
// for the definition of this statistic. Note that, as the definitions
// state, this count also includes all the function evaluations counted in
// the statistic, num_jacobian_function_evaluations.
int64_t num_function_evaluations{0};
// See ImplicitIntegrator::get_num_derivative_evaluations_for_jacobian()
// or ImplicitIntegrator::
// get_num_error_estimator_derivative_evaluations_for_jacobian()
// for the definition of this statistic.
int64_t num_jacobian_function_evaluations{0};
// See ImplicitIntegrator::get_num_newton_raphson_iterations()
// or ImplicitIntegrator::
// get_num_error_estimator_newton_raphson_iterations() for the definition
// of this statistic.
int64_t num_nr_iterations{0};
};
int64_t do_get_num_newton_raphson_iterations() const final {
return num_nr_iterations_;
}
int64_t do_get_num_error_estimator_derivative_evaluations() const final {
// When implicit trapezoid is chosen, implicit trapezoid is the error
// estimator, and statistics for it are directly reported; otherwise, the
// small half-sized steps are propagated and the large step is the error
// estimator, so we report error estimator stats by subtracting those of
// the small half-sized steps from the total statistics.
return use_implicit_trapezoid_error_estimation_
? itr_statistics_.num_function_evaluations
: (this->get_num_derivative_evaluations() -
hie_statistics_.num_function_evaluations);
}
int64_t do_get_num_error_estimator_derivative_evaluations_for_jacobian()
const final {
// See the above comments in
// do_get_num_error_estimator_derivative_evaluations().
return use_implicit_trapezoid_error_estimation_
? itr_statistics_.num_jacobian_function_evaluations
: (this->get_num_derivative_evaluations_for_jacobian() -
hie_statistics_.num_jacobian_function_evaluations);
}
int64_t do_get_num_error_estimator_newton_raphson_iterations()
const final {
// See the above comments in
// do_get_num_error_estimator_derivative_evaluations().
return use_implicit_trapezoid_error_estimation_
? itr_statistics_.num_nr_iterations
: (this->get_num_newton_raphson_iterations() -
hie_statistics_.num_nr_iterations);
}
int64_t do_get_num_error_estimator_jacobian_evaluations() const final {
// See the above comments in
// do_get_num_error_estimator_derivative_evaluations().
return use_implicit_trapezoid_error_estimation_
? itr_statistics_.num_jacobian_reforms
: (this->get_num_jacobian_evaluations() -
hie_statistics_.num_jacobian_reforms);
}
int64_t do_get_num_error_estimator_iteration_matrix_factorizations()
const final {
// See the above comments in
// do_get_num_error_estimator_derivative_evaluations().
return use_implicit_trapezoid_error_estimation_
? itr_statistics_.num_iter_factorizations
: (this->get_num_iteration_matrix_factorizations() -
hie_statistics_.num_iter_factorizations);
}
void DoResetCachedJacobianRelatedMatrices() final;
void DoResetImplicitIntegratorStatistics() final;
static void ComputeAndFactorImplicitEulerIterationMatrix(
const MatrixX<T>& J, const T& h,
typename ImplicitIntegrator<T>::IterationMatrix* iteration_matrix);
static void ComputeAndFactorImplicitTrapezoidIterationMatrix(
const MatrixX<T>& J, const T& h,
typename ImplicitIntegrator<T>::IterationMatrix* iteration_matrix);
void DoInitialize() final;
// Steps both implicit Euler and implicit trapezoid forward by h, if possible.
// @param t0 the time at the left end of the integration interval.
// @param h the integration step size to attempt.
// @param xt0 the continuous state at t0.
// @param [out] xtplus_ie contains the full-sized step implicit Euler
// integrator solution (i.e., `x(t0+h)`) on return.
// @param [out] xtplus_hie contains the half-sized step solution (i.e.,
// `x(t0+h)`) on return, or the implicit Trapezoid solution.
// @returns `true` if the step of size `h` was successful.
bool AttemptStepPaired(const T& t0, const T& h, const VectorX<T>& xt0,
VectorX<T>* xtplus_ie, VectorX<T>* xtplus_hie);
// Performs the bulk of the stepping computation for both implicit Euler and
// implicit trapezoid method; all those methods need to do is provide a
// residual function (`g`) and an iteration matrix computation and
// factorization function (`compute_and_factor_iteration_matrix`) specific to
// the particular integrator scheme and this method does the rest.
// @param t0 the time at the left end of the integration interval.
// @param h the integration step size (> 0) to attempt.
// @param xt0 the continuous state at t0.
// @param g the particular implicit function to compute the root of.
// @param compute_and_factor_iteration_matrix the function for computing and
// factorizing the iteration matrix.
// @param xtplus_guess the starting guess for x(t0+h) -- the full-sized step
// of implicit Euler passes x(t0) since it has no better guess;
// implicit Trapezoid and the half-sized steps of implicit Euler use
// the result from the full-sized step of implicit Euler.
// @param[out] iteration_matrix the iteration matrix to be used for the
// particular integration scheme (implicit Euler, implicit
// trapezoid), which will be computed and factored, if necessary.
// @param[out] xtplus the value for x(t0+h) on return.
// @param trial the attempt for this approach (1-4). StepAbstract() uses more
// computationally expensive methods as the trial numbers increase.
// @returns `true` if the method was successfully able to take an integration
// step of size h.
// @post The time and continuous state in the context are indeterminate upon
// exit.
// TODO(edrumwri) Explicitly test this method's fallback logic (i.e., how it
// calls MaybeFreshenMatrices()) in a unit test).
bool StepAbstract(const T& t0, const T& h, const VectorX<T>& xt0,
const std::function<VectorX<T>()>& g,
const std::function<
void(const MatrixX<T>&, const T&,
typename ImplicitIntegrator<T>::IterationMatrix*)>&
compute_and_factor_iteration_matrix,
const VectorX<T>& xtplus_guess,
typename ImplicitIntegrator<T>::IterationMatrix*
iteration_matrix, VectorX<T>* xtplus, int trial = 1);
// Takes a given step of the requested size, if possible.
// @returns `true` if successful; on `true`, the time and continuous state
// will be advanced in the context (e.g., from t0 to t0 + h). On a
// `false` return, the time and continuous state in the context will
// be restored to its original value (at t0).
bool DoImplicitIntegratorStep(const T& h) final;
// Steps the system forward by a single step of at most h using the implicit
// Euler method.
// @param t0 the time at the left end of the integration interval.
// @param h the maximum time increment to step forward.
// @param xt0 the continuous state at t0.
// @param[out] xtplus the computed value for `x(t0+h)` on successful return.
// @returns `true` if the step of size `h` was successful.
// @post The time and continuous state in the context are indeterminate upon
// exit.
bool StepImplicitEuler(const T& t0, const T& h, const VectorX<T>& xt0,
VectorX<T>* xtplus);
// Steps the system forward by a single step of at most h using the implicit
// Euler method, starting with a guess for the state xtplus.
// @param t0 the time at the left end of the integration interval.
// @param h the maximum time increment to step forward.
// @param xt0 the continuous state at t0.
// @param xtplus_guess the starting guess for `x(t0+h)`.
// @param[out] xtplus the computed value for `x(t0+h)` on successful return.
// @returns `true` if the step of size `h` was successful.
// @post The time and continuous state in the context are indeterminate upon
// exit.
bool StepImplicitEulerWithGuess(const T& t0, const T& h,
const VectorX<T>& xt0, const VectorX<T>& xtplus_guess,
VectorX<T>* xtplus);
// Steps forward by two steps of `h/2` using the implicit Euler
// method, if possible.
// @param t0 the time at the left end of the integration interval.
// @param h the maximum time increment to step forward.
// @param xt0 the continuous state at t0.
// @param xtplus_ie x(t0+h) computed by the implicit Euler method.
// @param[out] xtplus x(t0+h) computed by the two half-sized implicit Euler
// steps on successful return.
// @returns `true` if the step was successful.
// @post The time and continuous state in the context are indeterminate upon
// exit.
bool StepHalfSizedImplicitEulers(const T& t0, const T& h,
const VectorX<T>& xt0, const VectorX<T>& xtplus_ie, VectorX<T>* xtplus);
// Steps forward by a single step of `h` using the implicit trapezoid
// method, if possible.
// @param t0 the time at the left end of the integration interval.
// @param h the maximum time increment to step forward.
// @param xt0 the continuous state at t0.
// @param dx0 the time derivatives computed at time and state (t0, xt0).
// @param xtplus_ie x(t0+h) computed by the implicit Euler method.
// @param[out] xtplus x(t0+h) computed by the implicit trapezoid method on
// successful return.
// @returns `true` if the step was successful.
// @post The time and continuous state in the context are indeterminate upon
// exit.
bool StepImplicitTrapezoid(const T& t0, const T& h, const VectorX<T>& xt0,
const VectorX<T>& dx0, const VectorX<T>& xtplus_ie, VectorX<T>* xtplus);
// The last computed iteration matrix and factorization for implicit Euler.
typename ImplicitIntegrator<T>::IterationMatrix ie_iteration_matrix_;
// The last computed iteration matrix and factorization for implicit
// trapezoid.
typename ImplicitIntegrator<T>::IterationMatrix itr_iteration_matrix_;
// Vector used in error estimate calculations.
VectorX<T> err_est_vec_;
// The continuous state update vector used during Newton-Raphson.
std::unique_ptr<ContinuousState<T>> dx_state_;
// Variables to avoid heap allocations.
VectorX<T> xt0_, xdot_, xtplus_ie_, xtplus_hie_;
// Second order Runge-Kutta method for estimating the integration error when
// the requested step size lies below the working step size.
std::unique_ptr<RungeKutta2Integrator<T>> rk2_;
// Various statistics.
// This statistic tracks the number of Newton-Raphson iterations total,
// combining the base implicit Euler and either the implicit Trapezoid
// or the half-sized implicit Eulers. This is used in ImplicitIntegrator::
// get_num_newton_raphson_iterations(). Other statistics integers for the
// total are defined in ImplicitIntegrator.
int64_t num_nr_iterations_{0};
// These track statistics specific to implicit trapezoid or the two half-
// sized steps. Only one of the following two will be used at a time, the
// other one will remain at 0 as long as
// use_implicit_trapezoid_error_estimation_ does not change.
Statistics itr_statistics_;
Statistics hie_statistics_;
// Since this integrator computes two small steps for its solution and
// simultaneously computes a large step to estimate the error, this is a
// flag to indicate that the failed Jacobian is not computed from the
// beginning of the time step, but rather from the second small step. Usually,
// the Jacobian after a failed step was computed from (t0,x0), so
// ImplicitIntegrator marks it as "fresh" so that the next attempt
// will not attempt to compute a Jacobian. This flag tells the next step that
// the Jacobian is still not "fresh", or computed from (t0,x0) at the
// beginning of the step, even after the step has failed.
bool failed_jacobian_is_from_second_small_step_{false};
// If set to true, the integrator uses implicit trapezoid instead of two
// half-sized steps for error estimation.
bool use_implicit_trapezoid_error_estimation_{false};
};
} // namespace systems
} // namespace drake
DRAKE_DECLARE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS(
class drake::systems::ImplicitEulerIntegrator)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/velocity_implicit_euler_integrator.cc | #include "drake/systems/analysis/velocity_implicit_euler_integrator.h"
#include <cmath>
#include <limits>
#include <stdexcept>
#include <utility>
#include "drake/common/autodiff.h"
#include "drake/common/drake_assert.h"
#include "drake/common/fmt_eigen.h"
#include "drake/common/text_logging.h"
#include "drake/math/autodiff.h"
#include "drake/math/autodiff_gradient.h"
#include "drake/math/compute_numerical_gradient.h"
#include "drake/systems/analysis/implicit_integrator.h"
#include "drake/systems/framework/basic_vector.h"
namespace drake {
namespace systems {
template <class T>
void VelocityImplicitEulerIntegrator<T>::DoResetImplicitIntegratorStatistics() {
num_nr_iterations_ = 0;
num_half_vie_jacobian_reforms_ = 0;
num_half_vie_iter_factorizations_ = 0;
num_half_vie_function_evaluations_ = 0;
num_half_vie_jacobian_function_evaluations_ = 0;
num_half_vie_nr_iterations_ = 0;
}
template <class T>
void VelocityImplicitEulerIntegrator<T>::DoInitialize() {
using std::isnan;
// Allocate storage for changes to state variables during Newton-Raphson.
dx_state_ = this->get_system().AllocateTimeDerivatives();
const double kDefaultAccuracy = 1e-1; // Good for this particular integrator.
const double kLoosestAccuracy = 5e-1; // Loosest accuracy is quite loose.
// Set an artificial step size target, if not set already.
if (isnan(this->get_initial_step_size_target())) {
// Verify that the maximum step size has been set.
if (isnan(this->get_maximum_step_size()))
throw std::logic_error(
"Neither initial step size target nor maximum "
"step size has been set for VelocityImplicitEulerIntegrator.");
this->request_initial_step_size_target(this->get_maximum_step_size());
}
// Sets the working accuracy to a good value.
double working_accuracy = this->get_target_accuracy();
// If the user asks for accuracy that is looser than the loosest this
// integrator can provide, use the integrator's loosest accuracy setting
// instead.
if (isnan(working_accuracy))
working_accuracy = kDefaultAccuracy;
else if (working_accuracy > kLoosestAccuracy)
working_accuracy = kLoosestAccuracy;
this->set_accuracy_in_use(working_accuracy);
// Reset the Jacobian matrix (so that recomputation is forced).
this->Jy_vie_.resize(0, 0);
}
template <class T>
void VelocityImplicitEulerIntegrator<T>::
ComputeAndFactorImplicitEulerIterationMatrix(
const MatrixX<T>& J, const T& h,
typename ImplicitIntegrator<T>::IterationMatrix* iteration_matrix) {
const int n = J.rows();
// TODO(edrumwri) Investigate using a move-type operation below.
// We form the iteration matrix in this particular way to avoid an O(n^2)
// subtraction as would be the case with:
// MatrixX<T>::Identity(n, n) - h * J.
iteration_matrix->SetAndFactorIterationMatrix(-h * J +
MatrixX<T>::Identity(n, n));
}
template <class T>
void VelocityImplicitEulerIntegrator<T>::CalcVelocityJacobian(const T& t,
const T& h, const VectorX<T>& y, const VectorX<T>& qk,
const VectorX<T>& qn, MatrixX<T>* Jy) {
// Note: Unlike ImplicitIntegrator<T>::CalcJacobian(), we neither save the
// context or change it back, because our implementation of
// StepVelocityImplicitEuler() does not require the context to be restored.
this->increment_jacobian_evaluations();
// Get the existing number of ODE evaluations.
int64_t existing_ODE_evals = this->get_num_derivative_evaluations();
// Compute the Jacobian using the selected computation scheme.
if (this->get_jacobian_computation_scheme() ==
ImplicitIntegrator<T>::JacobianComputationScheme::kForwardDifference ||
this->get_jacobian_computation_scheme() ==
ImplicitIntegrator<T>::JacobianComputationScheme::kCentralDifference) {
// Compute the Jacobian using numerical differencing.
DRAKE_ASSERT(qdot_ != nullptr);
// Define the lambda l_of_y to evaluate ℓ(y).
std::function<void(const VectorX<T>&, VectorX<T>*)> l_of_y =
[&qk, &t, &qn, &h, this](const VectorX<T>& y_state,
VectorX<T>* l_result) {
*l_result =
this->ComputeLOfY(t, y_state, qk, qn, h, this->qdot_.get());
};
const math::NumericalGradientOption numerical_gradient_method(
(this->get_jacobian_computation_scheme() ==
ImplicitIntegrator<T>::JacobianComputationScheme::kCentralDifference) ?
math::NumericalGradientMethod::kCentral :
math::NumericalGradientMethod::kForward);
// Compute Jy by passing ℓ(y) to math::ComputeNumericalGradient().
// TODO(antequ): Right now we modify the context twice each time we call
// ℓ(y): once when we calculate qⁿ + h N(qₖ) v
// (SetTimeAndContinuousState()), and once when we calculate ℓ(y)
// (get_mutable_generalized_position()). However, this is only necessary for
// each y that modifies a velocity (v). For all but one of the
// miscellaneous states (z), we can reuse the position so that the context
// needs only one modification. Investigate how to refactor this logic to
// achieve this performance benefit while maintaining code readability.
*Jy = math::ComputeNumericalGradient(l_of_y, y, numerical_gradient_method);
} else if (
this->get_jacobian_computation_scheme() ==
ImplicitIntegrator<T>::JacobianComputationScheme::kAutomatic) {
// Compute the Jacobian using automatic differentiation.
this->ComputeAutoDiffVelocityJacobian(t, h, y, qk, qn, Jy);
} else {
throw new std::logic_error("Invalid Jacobian computation scheme.");
}
// Use the new number of ODE evaluations to determine the number of ODE
// evaluations used in computing Jacobians.
this->increment_jacobian_computation_derivative_evaluations(
this->get_num_derivative_evaluations() - existing_ODE_evals);
}
template <class T>
void VelocityImplicitEulerIntegrator<T>::ComputeAutoDiffVelocityJacobian(
const T& t, const T& h, const VectorX<T>& y, const VectorX<T>& qk,
const VectorX<T>& qn, MatrixX<T>* Jy) {
DRAKE_LOGGER_DEBUG(
"VelocityImplicitEulerIntegrator ComputeAutoDiffVelocityJacobian "
"{}-Jacobian t={}", y.size(), t);
DRAKE_LOGGER_DEBUG(" computing from qk {}, y {}", fmt_eigen(qk.transpose()),
fmt_eigen(y.transpose()));
// TODO(antequ): Investigate how to refactor this method to use
// math::jacobian(), if possible.
// Get the system and the context in AutoDiffable format. Inputs must also
// be copied to the context used by the AutoDiff'd system (which is
// accomplished using FixInputPortsFrom()).
const System<T>& system = this->get_system();
if (system_ad_ == nullptr) {
system_ad_ = system.ToAutoDiffXd();
context_ad_ = system_ad_->AllocateContext();
}
const Context<T>& context = this->get_context();
context_ad_->SetTimeStateAndParametersFrom(context);
system_ad_->FixInputPortsFrom(system, context, context_ad_.get());
if (qdot_ad_ == nullptr || qdot_ad_->size() != qn.size()) {
qdot_ad_ = std::make_unique<BasicVector<AutoDiffXd>>(qn.size());
}
// Initialize an AutoDiff version of the variable y.
VectorX<AutoDiffXd> y_ad = math::InitializeAutoDiff(y);
// Evaluate the AutoDiff system with y_ad.
const VectorX<AutoDiffXd> result = this->ComputeLOfY(
t, y_ad, qk, qn, h, this->qdot_ad_.get(),
*(this->system_ad_), this->context_ad_.get());
*Jy = math::ExtractGradient(result);
// Sometimes ℓ(y) does not depend on, for example, when ℓ(y) is a constant or
// when ℓ(y) depends only on t. In this case, make sure that the Jacobian
// isn't a n ✕ 0 matrix (this will cause a segfault when forming Newton
// iteration matrices); if it is, we set it equal to an n x n zero matrix.
const int ny = y.size();
if (Jy->cols() == 0) {
*Jy = MatrixX<T>::Zero(ny, ny);
}
DRAKE_ASSERT(Jy->rows() == ny);
DRAKE_ASSERT(Jy->cols() == ny);
}
template <class T>
bool VelocityImplicitEulerIntegrator<T>::MaybeFreshenVelocityMatrices(
const T& t, const VectorX<T>& y, const VectorX<T>& qk,
const VectorX<T>& qn, const T& h, int trial,
const std::function<void(const MatrixX<T>&, const T&,
typename ImplicitIntegrator<T>::IterationMatrix*)>&
compute_and_factor_iteration_matrix,
typename ImplicitIntegrator<T>::IterationMatrix* iteration_matrix,
MatrixX<T>* Jy) {
DRAKE_DEMAND(Jy != nullptr);
DRAKE_DEMAND(iteration_matrix != nullptr);
// Compute the initial Jacobian and iteration matrices and factor them, if
// necessary.
if (!this->get_reuse() || Jy->rows() == 0 || this->IsBadJacobian(*Jy)) {
CalcVelocityJacobian(t, h, y, qk, qn, Jy);
this->increment_num_iter_factorizations();
compute_and_factor_iteration_matrix(*Jy, h, iteration_matrix);
return true; // Indicate success.
}
// Reuse is activated, Jacobian is fully sized, and Jacobian is not "bad".
// If the iteration matrix has not been set and factored, do only that.
if (!iteration_matrix->matrix_factored()) {
this->increment_num_iter_factorizations();
compute_and_factor_iteration_matrix(*Jy, h, iteration_matrix);
return true; // Indicate success.
}
switch (trial) {
case 1:
// For the first trial, we do nothing: this will cause the Newton-Raphson
// process to use the last computed (and already factored) iteration
// matrix. This matrix may be from a previous time-step or a previously-
// attempted step size.
return true; // Indicate success.
case 2: {
// For the second trial, we know the first trial, which uses the last
// computed iteration matrix, has already failed. We perform the (likely)
// next least expensive operation, which is re-constructing and factoring
// the iteration matrix, using the last computed Jacobian. The last
// computed Jacobian may be from a previous time-step or a previously-
// attempted step size.
this->increment_num_iter_factorizations();
compute_and_factor_iteration_matrix(*Jy, h, iteration_matrix);
return true;
}
case 3: {
// For the third trial, we know that the first two trials, which
// exhausted all our options short of recomputing the Jacobian, have
// failed. We recompute the Jacobian matrix and refactor the iteration
// matrix.
// Note: Based on a few simple experimental tests, we found that the
// optimization to abort this trial when matrices are already fresh in
// ImplicitIntegrator<T>::MaybeFreshenMatrices() does not significantly
// help here, especially because our Jacobian depends on step size h.
CalcVelocityJacobian(t, h, y, qk, qn, Jy);
this->increment_num_iter_factorizations();
compute_and_factor_iteration_matrix(*Jy, h, iteration_matrix);
return true;
case 4: {
// Trial #4 indicates failure.
return false;
}
default:
throw std::domain_error("Unexpected trial number.");
}
}
}
template <class T>
void VelocityImplicitEulerIntegrator<T>::FreshenVelocityMatricesIfFullNewton(
const T& t, const VectorX<T>& y, const VectorX<T>& qk,
const VectorX<T>& qn, const T& h,
const std::function<void(const MatrixX<T>&, const T&,
typename ImplicitIntegrator<T>::IterationMatrix*)>&
compute_and_factor_iteration_matrix,
typename ImplicitIntegrator<T>::IterationMatrix* iteration_matrix,
MatrixX<T>* Jy) {
DRAKE_DEMAND(iteration_matrix != nullptr);
DRAKE_DEMAND(Jy != nullptr);
// Return immediately if full-Newton is not in use.
if (!this->get_use_full_newton()) return;
// Compute the initial Jacobian and iteration matrices and factor them.
CalcVelocityJacobian(t, h, y, qk, qn, Jy);
this->increment_num_iter_factorizations();
compute_and_factor_iteration_matrix(*Jy, h, iteration_matrix);
}
template <class T>
VectorX<T> VelocityImplicitEulerIntegrator<T>::ComputeResidualR(
const T& t, const VectorX<T>& y, const VectorX<T>& qk, const VectorX<T>& qn,
const VectorX<T>& yn, const T& h, BasicVector<T>* qdot) {
// Compute ℓ(y), which also sets the time and y states of the context.
const VectorX<T> l_of_y = ComputeLOfY(t, y, qk, qn, h, qdot);
// Evaluate R(y).
return y - yn - h * l_of_y;
}
template <class T>
template <typename U>
VectorX<U> VelocityImplicitEulerIntegrator<T>::ComputeLOfY(
const T& t, const VectorX<U>& y, const VectorX<T>& qk,
const VectorX<T>& qn, const T& h, BasicVector<U>* qdot,
const System<U>& system, Context<U>* context) {
DRAKE_DEMAND(qdot != nullptr);
DRAKE_DEMAND(context != nullptr);
int nq = qn.size();
int ny = y.size();
// Set the context to (t, qₖ, y)
// TODO(antequ): Optimize this procedure to both (1) remove unnecessary heap
// allocations, like in the VectorX<T> constructions of x and q and the return
// statement, and (2) reduce unnecessary cache invalidations since
// MapVelocityToQDot() doesn't set any caches.
VectorX<U> x(nq+ny);
x.head(nq) = qk;
x.tail(ny) = y;
context->SetTimeAndContinuousState(t, x);
// Compute q = qⁿ + h N(qₖ) v.
system.MapVelocityToQDot(*context,
context->get_continuous_state().get_generalized_velocity(), &*qdot);
const VectorX<U> q = qn + h * qdot->get_value();
// Evaluate ℓ = f_y(t, q, v, z).
// TODO(antequ): Right now this invalidates the entire cache that depends on
// any of the continuous state. Investigate invalidating less of the cache
// once we have a Context method for modifying just the generalized position.
context->get_mutable_continuous_state()
.get_mutable_generalized_position()
.SetFromVector(q);
const ContinuousState<U>& xc_deriv =
this->EvalTimeDerivatives(system, *context);
return xc_deriv.CopyToVector().tail(ny);
}
template <class T>
bool VelocityImplicitEulerIntegrator<T>::StepVelocityImplicitEuler(
const T& t0, const T& h, const VectorX<T>& xn,
const VectorX<T>& xtplus_guess, VectorX<T>* xtplus,
typename ImplicitIntegrator<T>::IterationMatrix* iteration_matrix,
MatrixX<T>* Jy, int trial) {
using std::abs;
// Verify the trial number is valid.
DRAKE_ASSERT(trial >= 1 && trial <= 4);
DRAKE_LOGGER_DEBUG(
"VelocityImplicitEulerIntegrator::StepVelocityImplicitEuler(h={}, t={})",
h, t0);
const System<T>& system = this->get_system();
// Verify xtplus. We also verify the size to make sure we're not making
// unnecessary heap allocations.
DRAKE_ASSERT(xtplus != nullptr && xtplus->size() == xn.size() &&
xtplus_guess.size() == xn.size());
// Initialize xtplus to the guess
*xtplus = xtplus_guess;
Context<T>* context = this->get_mutable_context();
const systems::ContinuousState<T>& cstate = context->get_continuous_state();
int nq = cstate.num_q();
int nv = cstate.num_v();
int nz = cstate.num_z();
const Eigen::VectorBlock<const VectorX<T>> qn = xn.head(nq);
const Eigen::VectorBlock<const VectorX<T>> yn = xn.tail(nv + nz);
// Define references to q, y, and v portions of xtplus for readability.
Eigen::VectorBlock<VectorX<T>> qtplus = xtplus->head(nq);
Eigen::VectorBlock<VectorX<T>> ytplus = xtplus->tail(nv + nz);
const Eigen::VectorBlock<VectorX<T>> vtplus = xtplus->segment(nq, nv);
// Set last_qtplus to qk. This will be used in computing dx to determine
// convergence.
VectorX<T> last_qtplus = qtplus;
// Verify the size of qdot_.
DRAKE_ASSERT(qdot_ != nullptr && qdot_->size() == nq);
// We compute our residuals at tf = t0 + h.
const T tf = t0 + h;
// Initialize the "last" state update norm; this will be used to detect
// convergence.
VectorX<T> dx(xn.size());
T last_dx_norm = std::numeric_limits<double>::infinity();
// Calculate Jacobian and iteration matrices (and factorizations), as needed,
// around (t0, xn). We do not do this calculation if full Newton is in use;
// the calculation will be performed at the beginning of the loop instead.
if (!this->get_use_full_newton() &&
!this->MaybeFreshenVelocityMatrices(t0, yn, qn, qn, h, trial,
ComputeAndFactorImplicitEulerIterationMatrix, iteration_matrix, Jy)) {
return false;
}
// Do the Newton-Raphson iterations.
for (int i = 0; i < this->max_newton_raphson_iterations(); ++i) {
DRAKE_LOGGER_DEBUG(
"VelocityImplicitEulerIntegrator::StepVelocityImplicitEuler() entered "
"for t={}, h={}, trial={}", t0, h, trial);
this->FreshenVelocityMatricesIfFullNewton(
tf, ytplus, qtplus, qn, h,
ComputeAndFactorImplicitEulerIterationMatrix, iteration_matrix, Jy);
// Update the number of Newton-Raphson iterations.
++num_nr_iterations_;
// Evaluate the residual error, which is defined above as R(yₖ):
// R(yₖ) = yₖ - yⁿ - h ℓ(yₖ).
VectorX<T> residual = ComputeResidualR(tf, ytplus, qtplus, qn,
yn, h, qdot_.get());
// Compute the state update using the equation A*y = -R(), where A is the
// iteration matrix.
const VectorX<T> dy = iteration_matrix->Solve(-residual);
// Update the y portion of xtplus to yₖ₊₁.
ytplus += dy;
// Update the q portion of xtplus to qₖ₊₁ = qⁿ + h N(qₖ) vₖ₊₁. Note that
// currently, qtplus is set to qₖ, while vtplus is set to vₖ₊₁.
// TODO(antequ): Optimize this so that the context doesn't invalidate the
// position state cache an unnecessary number of times, because evaluating
// N(q) does not set any cache.
// TODO(antequ): Right now this invalidates the entire cache that depends
// on any of the continuous state. Investigate invalidating less of the
// cache once we have a Context method for modifying just the generalized
// position.
context->get_mutable_continuous_state()
.get_mutable_generalized_position()
.SetFromVector(qtplus);
system.MapVelocityToQDot(*context, vtplus, qdot_.get());
qtplus = qn + h * qdot_->get_value();
dx << qtplus - last_qtplus, dy;
// Get the infinity norm of the weighted update vector.
dx_state_->get_mutable_vector().SetFromVector(dx);
// TODO(antequ): Replace this with CalcStateChangeNorm() when error
// control has been implemented.
// Get the norm of the update vector.
T dx_norm = dx_state_->CopyToVector().norm();
// Check for Newton-Raphson convergence.
typename ImplicitIntegrator<T>::ConvergenceStatus status =
this->CheckNewtonConvergence(i, *xtplus, dx, dx_norm, last_dx_norm);
// If it converged, we're done.
if (status == ImplicitIntegrator<T>::ConvergenceStatus::kConverged)
return true;
// If it diverged, we have to abort and try again.
if (status == ImplicitIntegrator<T>::ConvergenceStatus::kDiverged)
break;
// Otherwise, continue to the next Newton-Raphson iteration.
DRAKE_DEMAND(status ==
ImplicitIntegrator<T>::ConvergenceStatus::kNotConverged);
last_dx_norm = dx_norm;
last_qtplus = qtplus;
}
DRAKE_LOGGER_DEBUG("Velocity-Implicit Euler integrator convergence failed"
"for t={}, h={}, trial={}", t0, h, trial);
// If Jacobian and iteration matrix factorizations are not reused, there
// is nothing else we can try; otherwise, the following code will recurse
// into this function again, and freshen computations as helpful. Note that
// get_reuse() returns false if "full Newton-Raphson" mode is activated (see
// ImplicitIntegrator::get_use_full_newton()).
if (!this->get_reuse()) return false;
// Try StepVelocityImplicitEuler() again. This method will
// freshen Jacobians and iteration matrix factorizations as necessary.
return StepVelocityImplicitEuler(t0, h, xn, xtplus_guess, xtplus,
iteration_matrix, Jy, trial + 1);
}
template <class T>
bool VelocityImplicitEulerIntegrator<T>::StepHalfVelocityImplicitEulers(
const T& t0, const T& h, const VectorX<T>& xn,
const VectorX<T>& xtplus_guess, VectorX<T>* xtplus,
typename ImplicitIntegrator<T>::IterationMatrix* iteration_matrix,
MatrixX<T>* Jy) {
DRAKE_LOGGER_DEBUG(
"VelocityImplicitEulerIntegrator::StepHalfVelocityImplicitEulers(h={}, "
"t={})", h, t0);
// Store statistics before "error control". The difference between
// the modified statistics and the stored statistics will be used to compute
// the half-sized-step-specific statistics.
const int64_t stored_num_jacobian_evaluations =
this->get_num_jacobian_evaluations();
const int64_t stored_num_iter_factorizations =
this->get_num_iteration_matrix_factorizations();
const int64_t stored_num_function_evaluations =
this->get_num_derivative_evaluations();
const int64_t stored_num_jacobian_function_evaluations =
this->get_num_derivative_evaluations_for_jacobian();
const int64_t stored_num_nr_iterations =
this->get_num_newton_raphson_iterations();
// We set our guess for the state after a half-step to the average of the
// guess for the final state, xtplus_guess, and the initial state, xt0.
VectorX<T> xtmp = 0.5 * (xn + xtplus_guess);
const VectorX<T>& xthalf_guess = xtmp;
bool success = StepVelocityImplicitEuler(t0, 0.5 * h, xn, xthalf_guess,
xtplus, iteration_matrix, Jy);
if (!success) {
DRAKE_LOGGER_DEBUG("First Half VIE convergence failed.");
} else {
// Swap the current output, xtplus, into xthalf, which functions as the new
// xⁿ.
std::swap(xtmp, *xtplus);
const VectorX<T>& xthalf = xtmp;
success = StepVelocityImplicitEuler(t0 + 0.5 * h, 0.5 * h, xthalf,
xtplus_guess, xtplus, iteration_matrix,
Jy);
if (!success) {
DRAKE_LOGGER_DEBUG("Second Half VIE convergence failed.");
}
}
// Move statistics into half-sized-steps-specific statistics.
num_half_vie_jacobian_reforms_ +=
this->get_num_jacobian_evaluations() - stored_num_jacobian_evaluations;
num_half_vie_iter_factorizations_ +=
this->get_num_iteration_matrix_factorizations() -
stored_num_iter_factorizations;
num_half_vie_function_evaluations_ +=
this->get_num_derivative_evaluations() - stored_num_function_evaluations;
num_half_vie_jacobian_function_evaluations_ +=
this->get_num_derivative_evaluations_for_jacobian() -
stored_num_jacobian_function_evaluations;
num_half_vie_nr_iterations_ +=
this->get_num_newton_raphson_iterations() - stored_num_nr_iterations;
return success;
}
template <class T>
bool VelocityImplicitEulerIntegrator<T>::AttemptStepPaired(
const T& t0, const T& h, const VectorX<T>& xn, VectorX<T>* xtplus_vie,
VectorX<T>* xtplus_hvie) {
DRAKE_LOGGER_DEBUG(
"VelocityImplicitEulerIntegrator::AttemptStepPaired(h={}, "
"t={})", h, t0);
using std::abs;
DRAKE_ASSERT(xtplus_vie != nullptr);
DRAKE_ASSERT(xtplus_hvie != nullptr);
// Use the current state as the candidate value for the next state.
// [Hairer 1996] validates this choice (p. 120).
const VectorX<T>& xtplus_guess = xn;
// Do the large Velocity-Implicit Euler step.
if (!StepVelocityImplicitEuler(t0, h, xn, xtplus_guess, xtplus_vie,
&iteration_matrix_vie_, &Jy_vie_)) {
DRAKE_LOGGER_DEBUG(
"Velocity-Implicit Euler full-step approach did not converge for "
"step size {}", h);
return false;
}
// Do the half Velocity-Implicit Euler steps. We reuse the Jacobian and
// iteration Matrices from the big step because they work quite well,
// based on a few empirical tests.
if (!StepHalfVelocityImplicitEulers(t0, h, xn, *xtplus_vie, xtplus_hvie,
&iteration_matrix_vie_, &Jy_vie_)) {
DRAKE_LOGGER_DEBUG(
"Velocity-Implicit Euler half-step approach failed with a step size "
"that succeeded for the full step, {}", h);
return false;
}
return true;
}
template <class T>
bool VelocityImplicitEulerIntegrator<T>::DoImplicitIntegratorStep(const T& h) {
// Save the current time and state.
Context<T>* context = this->get_mutable_context();
const T t0 = context->get_time();
DRAKE_LOGGER_DEBUG("VelocityImplicitEulerIntegrator::"
"DoImplicitIntegratorStep(h={}) t={}", h, t0);
xn_ = context->get_continuous_state().CopyToVector();
xtplus_vie_.resize(xn_.size());
xtplus_hvie_.resize(xn_.size());
int nq = context->get_continuous_state().num_q();
if (qdot_ == nullptr || qdot_->size() != nq) {
qdot_ = std::make_unique<BasicVector<T>>(nq);
}
// If the requested h is less than the minimum step size, we'll advance time
// using an explicit Euler step.
if (h < this->get_working_minimum_step_size()) {
DRAKE_LOGGER_DEBUG(
"-- requested step too small, taking explicit step instead, at t={}, "
"h={}, minimum_h={}", t0, h, this->get_working_minimum_step_size());
// The error estimation process for explicit Euler uses two half-sized
// steps so that the order of the asymptotic term matches that used
// for estimating the error of the velocity-implicit Euler integrator.
// Compute the explicit Euler step.
xdot_ = this->EvalTimeDerivatives(*context).CopyToVector();
xtplus_vie_ = xn_ + h * xdot_;
// Compute the two explicit Euler steps
xtplus_hvie_ = xn_ + 0.5 * h * xdot_;
context->SetTimeAndContinuousState(t0 + 0.5 * h, xtplus_hvie_);
xdot_ = this->EvalTimeDerivatives(*context).CopyToVector();
xtplus_hvie_ += 0.5 * h * xdot_;
} else {
// Try taking the requested step.
const bool success = AttemptStepPaired(t0, h, xn_, &xtplus_vie_,
&xtplus_hvie_);
// If the step was not successful, reset the time and state.
if (!success) {
DRAKE_LOGGER_DEBUG(
"Velocity-Implicit Euler paired approach did not converge for "
"time t={}, step size h={}", t0, h);
context->SetTimeAndContinuousState(t0, xn_);
return false;
}
}
// Compute and update the error estimate. IntegratorBase will use the norm of
// this vector to adjust step size.
err_est_vec_ = (xtplus_vie_ - xtplus_hvie_);
// Update the caller-accessible error estimate.
this->get_mutable_error_estimate()->get_mutable_vector().SetFromVector(
err_est_vec_);
// Set the state to the computed state from the half-steps.
context->SetTimeAndContinuousState(t0 + h, xtplus_hvie_);
return true;
}
} // namespace systems
} // namespace drake
DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS(
class ::drake::systems::VelocityImplicitEulerIntegrator)
| 0 |
/home/johnshepherd/drake/systems | /home/johnshepherd/drake/systems/analysis/implicit_integrator.cc | #include "drake/systems/analysis/implicit_integrator.h"
#include <cmath>
#include <stdexcept>
#include "drake/common/autodiff.h"
#include "drake/common/drake_assert.h"
#include "drake/common/fmt_eigen.h"
#include "drake/common/text_logging.h"
#include "drake/math/autodiff_gradient.h"
namespace drake {
namespace systems {
template <class T>
void ImplicitIntegrator<T>::DoResetStatistics() {
num_iter_factorizations_ = 0;
num_jacobian_function_evaluations_ = 0;
num_jacobian_evaluations_ = 0;
DoResetImplicitIntegratorStatistics();
}
template <class T>
void ImplicitIntegrator<T>::DoReset() {
J_.resize(0, 0);
DoResetCachedJacobianRelatedMatrices();
// Call any Reset() provided by child integrator classes.
DoImplicitIntegratorReset();
}
template <class T>
void ImplicitIntegrator<T>::ComputeAutoDiffJacobian(
const System<T>& system, const T& t, const VectorX<T>& xt,
const Context<T>& context, MatrixX<T>* J) {
DRAKE_LOGGER_DEBUG(" ImplicitIntegrator Compute Autodiff Jacobian t={}", t);
// TODO(antequ): Investigate how to refactor this method to use
// math::jacobian(), if possible.
// Create AutoDiff versions of the state vector.
// Set the size of the derivatives and prepare for Jacobian calculation.
VectorX<AutoDiffXd> a_xt = math::InitializeAutoDiff(xt);
// Get the system and the context in AutoDiffable format. Inputs must also
// be copied to the context used by the AutoDiff'd system (which is
// accomplished using FixInputPortsFrom()).
// TODO(edrumwri): Investigate means for moving as many of the operations
// below offline (or with lower frequency than once-per-
// Jacobian calculation) as is possible. These operations
// are likely to be expensive.
const auto adiff_system = system.ToAutoDiffXd();
std::unique_ptr<Context<AutoDiffXd>> adiff_context = adiff_system->
AllocateContext();
adiff_context->SetTimeStateAndParametersFrom(context);
adiff_system->FixInputPortsFrom(system, context, adiff_context.get());
adiff_context->SetTime(t);
// Set the continuous state in the context.
adiff_context->SetContinuousState(a_xt);
// Evaluate the derivatives at that state.
const VectorX<AutoDiffXd> result =
this->EvalTimeDerivatives(*adiff_system, *adiff_context).CopyToVector();
*J = math::ExtractGradient(result);
// Sometimes the system's derivatives f(t, x) do not depend on its states, for
// example, when f(t, x) = constant or when f(t, x) depends only on t. In this
// case, make sure that the Jacobian isn't a n ✕ 0 matrix (this will cause a
// segfault when forming Newton iteration matrices); if it is, we set it equal
// to an n x n zero matrix.
if (J->cols() == 0) {
*J = MatrixX<T>::Zero(xt.size(), xt.size());
}
}
template <class T>
void ImplicitIntegrator<T>::ComputeForwardDiffJacobian(
const System<T>&, const T& t, const VectorX<T>& xt, Context<T>* context,
MatrixX<T>* J) {
using std::abs;
// Set epsilon to the square root of machine precision.
const double eps = std::sqrt(std::numeric_limits<double>::epsilon());
// Get the number of continuous state variables xt.
const int n = context->num_continuous_states();
DRAKE_LOGGER_DEBUG(
" ImplicitIntegrator Compute Forwarddiff {}-Jacobian t={}", n, t);
DRAKE_LOGGER_DEBUG(
" computing from state {}", fmt_eigen(xt.transpose()));
// Initialize the Jacobian.
J->resize(n, n);
// Evaluate f(t,xt).
context->SetTimeAndContinuousState(t, xt);
const VectorX<T> f = this->EvalTimeDerivatives(*context).CopyToVector();
// Compute the Jacobian.
VectorX<T> xt_prime = xt;
for (int i = 0; i < n; ++i) {
// Compute a good increment to the dimension using approximately 1/eps
// digits of precision. Note that if |xt| is large, the increment will
// be large as well. If |xt| is small, the increment will be no smaller
// than eps.
const T abs_xi = abs(xt(i));
T dxi(abs_xi);
if (dxi <= 1) {
// When |xt[i]| is small, increment will be eps.
dxi = eps;
} else {
// |xt[i]| not small; make increment a fraction of |xt[i]|.
dxi = eps * abs_xi;
}
// Update xt', minimizing the effect of roundoff error by ensuring that
// x and dx differ by an exactly representable number. See p. 192 of
// Press, W., Teukolsky, S., Vetterling, W., and Flannery, P. Numerical
// Recipes in C++, 2nd Ed., Cambridge University Press, 2002.
xt_prime(i) = xt(i) + dxi;
dxi = xt_prime(i) - xt(i);
// TODO(sherm1) This is invalidating q, v, and z but we only changed one.
// Switch to a method that invalides just the relevant
// partition, and ideally modify only the one changed element.
// Compute f' and set the relevant column of the Jacobian matrix.
context->SetTimeAndContinuousState(t, xt_prime);
J->col(i) = (this->EvalTimeDerivatives(*context).CopyToVector() - f) / dxi;
// Reset xt' to xt.
xt_prime(i) = xt(i);
}
}
template <class T>
void ImplicitIntegrator<T>::ComputeCentralDiffJacobian(
const System<T>&, const T& t, const VectorX<T>& xt, Context<T>* context,
MatrixX<T>* J) {
using std::abs;
// Cube root of machine precision (indicated by theory) seems a bit coarse.
// Pick power of eps halfway between 6/12 (i.e., 1/2) and 4/12 (i.e., 1/3).
const double eps = std::pow(std::numeric_limits<double>::epsilon(), 5.0/12);
// Get the number of continuous state variables xt.
const int n = context->num_continuous_states();
DRAKE_LOGGER_DEBUG(
" ImplicitIntegrator Compute Centraldiff {}-Jacobian t={}", n, t);
// Initialize the Jacobian.
J->resize(n, n);
// Evaluate f(t,xt).
context->SetTimeAndContinuousState(t, xt);
const VectorX<T> f = this->EvalTimeDerivatives(*context).CopyToVector();
// Compute the Jacobian.
VectorX<T> xt_prime = xt;
for (int i = 0; i < n; ++i) {
// Compute a good increment to the dimension using approximately 1/eps
// digits of precision. Note that if |xt| is large, the increment will
// be large as well. If |xt| is small, the increment will be no smaller
// than eps.
const T abs_xi = abs(xt(i));
T dxi(abs_xi);
if (dxi <= 1) {
// When |xt[i]| is small, increment will be eps.
dxi = eps;
} else {
// |xt[i]| not small; make increment a fraction of |xt[i]|.
dxi = eps * abs_xi;
}
// Update xt', minimizing the effect of roundoff error, by ensuring that
// x and dx differ by an exactly representable number. See p. 192 of
// Press, W., Teukolsky, S., Vetterling, W., and Flannery, P. Numerical
// Recipes in C++, 2nd Ed., Cambridge University Press, 2002.
xt_prime(i) = xt(i) + dxi;
const T dxi_plus = xt_prime(i) - xt(i);
// TODO(sherm1) This is invalidating q, v, and z but we only changed one.
// Switch to a method that invalides just the relevant
// partition, and ideally modify only the one changed element.
// Compute f(x+dx).
context->SetContinuousState(xt_prime);
VectorX<T> fprime_plus = this->EvalTimeDerivatives(*context).CopyToVector();
// Update xt' again, minimizing the effect of roundoff error.
xt_prime(i) = xt(i) - dxi;
const T dxi_minus = xt(i) - xt_prime(i);
// Compute f(x-dx).
context->SetContinuousState(xt_prime);
VectorX<T> fprime_minus = this->EvalTimeDerivatives(
*context).CopyToVector();
// Set the Jacobian column.
J->col(i) = (fprime_plus - fprime_minus) / (dxi_plus + dxi_minus);
// Reset xt' to xt.
xt_prime(i) = xt(i);
}
}
template <class T>
void ImplicitIntegrator<T>::IterationMatrix::SetAndFactorIterationMatrix(
const MatrixX<T>& iteration_matrix) {
LU_.compute(iteration_matrix);
matrix_factored_ = true;
}
template <class T>
VectorX<T> ImplicitIntegrator<T>::IterationMatrix::Solve(
const VectorX<T>& b) const {
return LU_.solve(b);
}
template <typename T>
typename ImplicitIntegrator<T>::ConvergenceStatus
ImplicitIntegrator<T>::CheckNewtonConvergence(
int iteration, const VectorX<T>& xtplus, const VectorX<T>& dx,
const T& dx_norm, const T& last_dx_norm) const {
// The check below looks for convergence by identifying cases where the
// update to the state results in no change.
// Note: Since we are performing this check at the end of the iteration,
// after xtplus has been updated, we also know that there is at least some
// change to the state, no matter how small, on a non-stationary system.
// Future maintainers should make sure this check only occurs after a change
// has been made to the state.
if (this->IsUpdateZero(xtplus, dx)) {
DRAKE_LOGGER_DEBUG("magnitude of state update indicates convergence");
return ConvergenceStatus::kConverged;
}
// Compute the convergence rate and check convergence.
// [Hairer, 1996] notes that this convergence strategy should only be applied
// after *at least* two iterations (p. 121). In practice, we find that it
// needs to run at least three iterations otherwise some error-controlled runs
// may choke, hence we check if iteration > 1.
if (iteration > 1) {
// TODO(edrumwri) Hairer's RADAU5 implementation (allegedly) uses
// theta = sqrt(dx[k] / dx[k-2]) while DASSL uses
// theta = pow(dx[k] / dx[0], 1/k), so investigate setting
// theta to these alternative values for minimizing convergence failures.
const T theta = dx_norm / last_dx_norm;
const T eta = theta / (1 - theta);
DRAKE_LOGGER_DEBUG("Newton-Raphson loop {} theta: {}, eta: {}",
iteration, theta, eta);
// Look for divergence.
if (theta > 1) {
DRAKE_LOGGER_DEBUG("Newton-Raphson divergence detected");
return ConvergenceStatus::kDiverged;
}
// Look for convergence using Equation IV.8.10 from [Hairer, 1996].
// [Hairer, 1996] determined values of kappa in [0.01, 0.1] work most
// efficiently on a number of test problems with *Radau5* (a fifth order
// implicit integrator), p. 121. We select a value halfway in-between.
const double kappa = 0.05;
const double k_dot_tol = kappa * this->get_accuracy_in_use();
if (eta * dx_norm < k_dot_tol) {
DRAKE_LOGGER_DEBUG("Newton-Raphson converged; η = {}", eta);
return ConvergenceStatus::kConverged;
}
}
return ConvergenceStatus::kNotConverged;
}
template <class T>
bool ImplicitIntegrator<T>::IsBadJacobian(const MatrixX<T>& J) const {
return !J.allFinite();
}
template <class T>
const MatrixX<T>& ImplicitIntegrator<T>::CalcJacobian(const T& t,
const VectorX<T>& x) {
// We change the context but will change it back.
Context<T>* context = this->get_mutable_context();
// Get the current time and state.
const T t_current = context->get_time();
const VectorX<T> x_current = context->get_continuous_state_vector().
CopyToVector();
// Update the time and state.
context->SetTimeAndContinuousState(t, x);
num_jacobian_evaluations_++;
// Get the current number of ODE evaluations.
int64_t current_ODE_evals = this->get_num_derivative_evaluations();
// Get a the system.
const System<T>& system = this->get_system();
// TODO(edrumwri): Give the caller the option to provide their own Jacobian.
[this, context, &system, &t, &x]() {
switch (jacobian_scheme_) {
case JacobianComputationScheme::kForwardDifference:
ComputeForwardDiffJacobian(system, t, x, &*context, &J_);
break;
case JacobianComputationScheme::kCentralDifference:
ComputeCentralDiffJacobian(system, t, x, &*context, &J_);
break;
case JacobianComputationScheme::kAutomatic:
ComputeAutoDiffJacobian(system, t, x, *context, &J_);
break;
}
}();
// Use the new number of ODE evaluations to determine the number of Jacobian
// evaluations.
num_jacobian_function_evaluations_ += this->get_num_derivative_evaluations()
- current_ODE_evals;
// Reset the time and state.
context->SetTimeAndContinuousState(t_current, x_current);
// Mark the Jacobian as fresh, so that we don't recompute it unnecessarily
// during the step.
jacobian_is_fresh_ = true;
return J_;
}
template <class T>
void ImplicitIntegrator<T>::FreshenMatricesIfFullNewton(
const T& t, const VectorX<T>& xt, const T& h,
const std::function<void(const MatrixX<T>&, const T&,
typename ImplicitIntegrator<T>::IterationMatrix*)>&
compute_and_factor_iteration_matrix,
typename ImplicitIntegrator<T>::IterationMatrix* iteration_matrix) {
DRAKE_DEMAND(iteration_matrix != nullptr);
// Return immediately if full-Newton is not in use.
if (!get_use_full_newton()) return;
// Compute the initial Jacobian and iteration matrices and factor them.
MatrixX<T>& J = get_mutable_jacobian();
J = CalcJacobian(t, xt);
++num_iter_factorizations_;
compute_and_factor_iteration_matrix(J, h, iteration_matrix);
}
template <class T>
bool ImplicitIntegrator<T>::MaybeFreshenMatrices(
const T& t, const VectorX<T>& xt, const T& h, int trial,
const std::function<void(const MatrixX<T>&, const T&,
typename ImplicitIntegrator<T>::IterationMatrix*)>&
compute_and_factor_iteration_matrix,
typename ImplicitIntegrator<T>::IterationMatrix* iteration_matrix) {
// Compute the initial Jacobian and iteration matrices and factor them, if
// necessary.
MatrixX<T>& J = get_mutable_jacobian();
if (!get_reuse() || J.rows() == 0 || IsBadJacobian(J)) {
J = CalcJacobian(t, xt);
++num_iter_factorizations_;
compute_and_factor_iteration_matrix(J, h, iteration_matrix);
return true; // Indicate success.
}
// Reuse is activated, Jacobian is fully sized, and Jacobian is not "bad".
// If the iteration matrix has not been set and factored, do only that.
// In most cases, the iteration matrix is already factorized if the Jacobian
// has been properly computed. However, one example where this code block
// might be triggered would be if the child integrator uses the same Jacobian,
// but two different iteration matrices, for two methods, such as implicit
// Euler with implicit Trapezoid error estimation. During the first implicit
// Euler step, the Jacobian is computed and the implicit Euler matrix is
// factorized. Afterwards, during the first implicit Trapezoid step,
// the Jacobian (which it shares with implicit Euler) is fresh, but the
// implicit Trapezoid iteration matrix is not factorized, and so this block
// of code will factorize it.
if (!iteration_matrix->matrix_factored()) {
++num_iter_factorizations_;
compute_and_factor_iteration_matrix(J, h, iteration_matrix);
return true; // Indicate success.
}
switch (trial) {
case 1:
// For the first trial, we do nothing: this will cause the Newton-Raphson
// process to use the last computed (and already factored) iteration
// matrix. This matrix may be from a previous time-step or a previously-
// attempted step size.
return true; // Indicate success.
case 2: {
// For the second trial, we know the first trial, which uses the last
// computed iteration matrix, has already failed. The last computed
// iteration matrix may be from many time steps ago, or it may be from a
// different step size. We perform the (likely) next least expensive
// operation, which is re-constructing and factoring the iteration
// matrix, using the last computed Jacobian. The last computed Jacobian
// may also be from many time steps ago, or it may be from a previously-
// attempted step size.
// TODO(antequ): In two particular cases, this may compute the same
// iteration matrix twice. Currently they are rare and unimportant, but
// in the future, it may be worth it to investigate optimizing these two
// cases if they make a performance difference:
// 1. During the first time step of the simulation, trial 1 will compute
// the initial iteration matrix, and trial 2 will compute the same
// iteration matrix again if trial 1 fails.
// 2. For implicit Euler with step doubling, it is possible that trial 3
// gets triggered on the first small step, which then fails, and after the
// step size is halved, trial 2 is triggered on the first large step,
// which requires the same iteration matrix (so the matrix is correct
// and does not actually need recomputation).
// In both cases, the right thing to do would be to skip to trial 3.
++num_iter_factorizations_;
compute_and_factor_iteration_matrix(J, h, iteration_matrix);
return true;
}
case 3: {
// For the third trial, we know that the first two trials, which
// exhausted all our options short of recomputing the Jacobian, have
// failed.
// The Jacobian matrix may already be "fresh", meaning that there is
// nothing more that can be tried (Jacobian and iteration matrix are both
// fresh), and we need to indicate failure.
if (jacobian_is_fresh_)
return false;
// Otherwise, we can reform the Jacobian matrix and refactor the
// iteration matrix.
J = CalcJacobian(t, xt);
++num_iter_factorizations_;
compute_and_factor_iteration_matrix(J, h, iteration_matrix);
return true;
case 4: {
// Trial #4 indicates failure.
return false;
}
default:
throw std::domain_error("Unexpected trial number.");
}
}
}
} // namespace systems
} // namespace drake
DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS(
class drake::systems::ImplicitIntegrator)
| 0 |
/home/johnshepherd/drake/systems/analysis | /home/johnshepherd/drake/systems/analysis/test_utilities/spring_mass_system.cc | #include "drake/systems/analysis/test_utilities/spring_mass_system.h"
#include <utility>
#include "drake/common/default_scalars.h"
namespace drake {
namespace systems {
namespace {
constexpr int kStateSize = 3; // position, velocity, power integral
} // namespace
template <typename T>
SpringMassStateVector<T>::SpringMassStateVector(const T& initial_position,
const T& initial_velocity)
: BasicVector<T>(kStateSize) {
set_position(initial_position);
set_velocity(initial_velocity);
set_conservative_work(0);
}
template <typename T>
SpringMassStateVector<T>::SpringMassStateVector()
: SpringMassStateVector(0.0, 0.0) {}
template <typename T>
SpringMassStateVector<T>::~SpringMassStateVector() {}
// Order matters: Position (q) precedes velocity (v) precedes misc. (z) in
// ContinuousState.
template <typename T>
T SpringMassStateVector<T>::get_position() const {
return this->GetAtIndex(0);
}
template <typename T>
T SpringMassStateVector<T>::get_velocity() const {
return this->GetAtIndex(1);
}
template <typename T>
T SpringMassStateVector<T>::get_conservative_work() const {
return this->GetAtIndex(2);
}
template <typename T>
void SpringMassStateVector<T>::set_position(const T& q) {
this->SetAtIndex(0, q);
}
template <typename T>
void SpringMassStateVector<T>::set_velocity(const T& v) {
this->SetAtIndex(1, v);
}
template <typename T>
void SpringMassStateVector<T>::set_conservative_work(const T& work) {
this->SetAtIndex(2, work);
}
template <typename T>
SpringMassStateVector<T>* SpringMassStateVector<T>::DoClone() const {
auto state = new SpringMassStateVector<T>(get_position(), get_velocity());
state->set_conservative_work(get_conservative_work());
return state;
}
template <typename T>
SpringMassSystem<T>::SpringMassSystem(
SystemScalarConverter converter,
double spring_constant_N_per_m,
double mass_kg,
bool system_is_forced)
: LeafSystem<T>(std::move(converter)),
spring_constant_N_per_m_(spring_constant_N_per_m),
mass_kg_(mass_kg),
system_is_forced_(system_is_forced) {
// Declares input port for forcing term.
if (system_is_forced_) {
this->DeclareInputPort(kUseDefaultName, kVectorValued, 1);
}
// Declares output port for q, qdot, Energy.
this->DeclareVectorOutputPort(kUseDefaultName, SpringMassStateVector<T>(),
&SpringMassSystem::SetOutputValues);
this->DeclareContinuousState(SpringMassStateVector<T>(),
1 /* num_q */, 1 /* num_v */, 1 /* num_z */);
}
template <typename T>
SpringMassSystem<T>::SpringMassSystem(
double spring_constant_N_per_m,
double mass_kg,
bool system_is_forced)
: SpringMassSystem(
SystemTypeTag<SpringMassSystem>{},
spring_constant_N_per_m,
mass_kg,
system_is_forced) {}
template <typename T>
template <typename U>
SpringMassSystem<T>::SpringMassSystem(const SpringMassSystem<U>& other)
: SpringMassSystem(
other.get_spring_constant(),
other.get_mass(),
other.get_system_is_forced()) {}
template <typename T>
const InputPort<T>& SpringMassSystem<T>::get_force_port() const {
if (system_is_forced_) {
return this->get_input_port(0);
} else {
throw std::runtime_error(
"Attempting to access input force port when this SpringMassSystem was "
"instantiated with no input ports.");
}
}
template <typename T>
T SpringMassSystem<T>::EvalSpringForce(const Context<T>& context) const {
const double k = spring_constant_N_per_m_;
const T& x = get_position(context);
T x0 = 0; // TODO(david-german-tri) should be a parameter.
T stretch = x - x0, f = -k * stretch;
return f;
}
template <typename T>
T SpringMassSystem<T>::DoCalcPotentialEnergy(const Context<T>& context) const {
const double k = spring_constant_N_per_m_;
const T& x = get_position(context),
x0 = 0., // TODO(david-german-tri) should be a parameter.
stretch = x - x0, pe = k * stretch * stretch / 2;
return pe;
}
template <typename T>
T SpringMassSystem<T>::DoCalcKineticEnergy(const Context<T>& context) const {
const double m = mass_kg_;
const T& v = get_velocity(context), ke = m * v * v / 2;
return ke;
}
template <typename T>
T SpringMassSystem<T>::DoCalcConservativePower(
const Context<T>& context) const {
const T& power_c = EvalSpringForce(context) * get_velocity(context);
return power_c;
}
template <typename T>
T SpringMassSystem<T>::DoCalcNonConservativePower(const Context<T>&) const {
const T& power_nc = 0.;
return power_nc;
}
// Assign the state to the output.
template <typename T>
void SpringMassSystem<T>::SetOutputValues(
const Context<T>& context, SpringMassStateVector<T>* output_vector) const {
const SpringMassStateVector<T>& state = get_state(context);
output_vector->set_position(state.get_position());
output_vector->set_velocity(state.get_velocity());
}
// Compute the actual physics.
template <typename T>
void SpringMassSystem<T>::DoCalcTimeDerivatives(
const Context<T>& context,
ContinuousState<T>* derivatives) const {
// TODO(david-german-tri): Cache the output of this function.
const SpringMassStateVector<T>& state = get_state(context);
SpringMassStateVector<T>& derivative_vector = get_mutable_state(derivatives);
// The derivative of position is velocity.
derivative_vector.set_position(state.get_velocity());
const T external_force = get_input_force(context);
// By Newton's 2nd law, the derivative of velocity (acceleration) is f/m where
// f is the force applied to the body by the spring, and m is the mass of the
// body.
const T force_applied_to_body = EvalSpringForce(context) + external_force;
derivative_vector.set_velocity(force_applied_to_body / mass_kg_);
// We are integrating conservative power to get the work done by conservative
// force elements, that is, the net energy transferred between the spring and
// the mass over time.
derivative_vector.set_conservative_work(
this->CalcConservativePower(context));
}
} // namespace systems
} // namespace drake
DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_SCALARS(
class ::drake::systems::SpringMassStateVector)
DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_SCALARS(
class ::drake::systems::SpringMassSystem)
| 0 |
/home/johnshepherd/drake/systems/analysis | /home/johnshepherd/drake/systems/analysis/test_utilities/robertson_system.h | #pragma once
#include <cmath>
#include "drake/systems/framework/leaf_system.h"
namespace drake {
namespace systems {
namespace analysis {
namespace test {
/// Robertson's stiff chemical reaction problem. This example is taken from
/// [Hairer, 1996] and is described in more detail in:
/// http://www.radford.edu/~thompson/vodef90web/problems/demosnodislin/Single/DemoRobertson/demorobertson.pdf
/// The original system is described in:
///
/// - [Robertson, 1966] H. H. Robertson. "The solution of a system of reaction
/// rate equations" in Numerical Analysis, An Introduction.
/// Pages 178-182. Academic Press, 1966.
template <class T>
class RobertsonSystem : public LeafSystem<T> {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(RobertsonSystem)
RobertsonSystem() {
this->DeclareContinuousState(3);
}
void DoCalcTimeDerivatives(const Context<T>& context,
ContinuousState<T>* deriv) const override {
// Get state.
const T& y1 = context.get_continuous_state_vector().GetAtIndex(0);
const T& y2 = context.get_continuous_state_vector().GetAtIndex(1);
const T& y3 = context.get_continuous_state_vector().GetAtIndex(2);
// Compute derivatives.
T y1_prime = -0.04 * y1 + 1e4 * y2 * y3;
T y2_prime = 0.04 * y1 - 1e4 * y2 * y3 - 3e7 * y2 * y2;
T y3_prime = 3e7 * y2 * y2;
// Set the derivatives.
deriv->get_mutable_vector().SetAtIndex(0, y1_prime);
deriv->get_mutable_vector().SetAtIndex(1, y2_prime);
deriv->get_mutable_vector().SetAtIndex(2, y3_prime);
}
/// Sets the initial conditions for the Robertson system.
void SetDefaultState(
const Context<T>& context, State<T>* state) const override {
auto& xc = state->get_mutable_continuous_state().get_mutable_vector();
xc.SetAtIndex(0, 1);
xc.SetAtIndex(1, 0);
xc.SetAtIndex(2, 0);
}
/// Gets the end time for integration.
T get_end_time() const { return 1e11; }
/// Gets the system solution. Only works for time 10^11.
static Vector3<T> GetSolution(const T& t) {
DRAKE_DEMAND(t == 1e11);
Vector3<T> sol;
sol(0) = 0.208334014970122e-7;
sol(1) = 0.8333360770334713e-13;
sol(2) = 0.9999999791665050;
return sol;
}
};
} // namespace test
} // namespace analysis
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems/analysis | /home/johnshepherd/drake/systems/analysis/test_utilities/logistic_system.h | #pragma once
#include <cmath>
#include <memory>
#include <utility>
#include <vector>
#include "drake/systems/framework/leaf_system.h"
#include "drake/systems/framework/witness_function.h"
namespace drake {
namespace systems {
namespace analysis_test {
/// System with state evolution yielding a logistic function, for purposes of
/// witness function testing using the differential equation
/// dx/dt = α⋅(1 - (x/k)^ν)⋅t, where ν > 0 (affects the shape of the curve),
/// α > 0 (growth rate), and k is the upper asymptote.
template <class T>
class LogisticSystem : public LeafSystem<T> {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(LogisticSystem)
LogisticSystem(double k, double alpha, double nu)
: k_(k), alpha_(alpha), nu_(nu) {
this->DeclareContinuousState(1);
witness_ = this->MakeWitnessFunction(
"Logistic witness", WitnessFunctionDirection::kCrossesZero,
&LogisticSystem::GetStateValue, &LogisticSystem::InvokePublishCallback);
}
void set_publish_callback(
std::function<void(const Context<double>&)> callback) {
publish_callback_ = callback;
}
protected:
void DoCalcTimeDerivatives(const systems::Context<T>& context,
systems::ContinuousState<T>* continuous_state) const override {
using std::pow;
// Get the current time.
const T& t = context.get_time();
// Get state.
const T& x = context.get_continuous_state()[0];
// Compute the derivative.
(*continuous_state)[0] = alpha_ * (1 - pow(x/k_, nu_)) * t;
}
void DoGetWitnessFunctions(
const systems::Context<T>&,
std::vector<const systems::WitnessFunction<T>*>* w) const override {
w->push_back(witness_.get());
}
void InvokePublishCallback(const Context<T>& context,
const PublishEvent<T>&) const {
if (this->publish_callback_ != nullptr) this->publish_callback_(context);
}
private:
std::unique_ptr<WitnessFunction<T>> witness_;
std::function<void(const Context<double>&)> publish_callback_{nullptr};
T GetStateValue(const Context<T>& context) const {
return context.get_continuous_state()[0];
}
// The upper asymptote on the logistic function.
double k_{1.0};
// The rate (> 0) at which the logistic function approaches the asymptote.
double alpha_{1.0};
// Parameter (> 0) that affects near which asymptote maximum growth occurs.
double nu_{1.0};
};
} // namespace analysis_test
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems/analysis | /home/johnshepherd/drake/systems/analysis/test_utilities/linear_scalar_system.h | #pragma once
#include "drake/systems/framework/context.h"
#include "drake/systems/framework/leaf_system.h"
namespace drake {
namespace systems {
namespace analysis_test {
/// System where the state at (scalar) time t corresponds to the linear equation
/// St + 3, where S is 4 by default.
class LinearScalarSystem : public LeafSystem<double> {
public:
explicit LinearScalarSystem(double S = 4.0) : S_(S) {
this->DeclareContinuousState(1);
}
// Evaluates the system at time t.
double Evaluate(double t) const {
return 3 + S_ * t;
}
private:
void SetDefaultState(
const Context<double>& context, State<double>* state) const final {
const double t0 = 0.0;
state->get_mutable_continuous_state().get_mutable_vector()[0] =
Evaluate(t0);
}
void DoCalcTimeDerivatives(
const Context<double>&,
ContinuousState<double>* deriv) const override {
(*deriv)[0] = S_;
}
double S_{};
};
} // namespace analysis_test
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems/analysis | /home/johnshepherd/drake/systems/analysis/test_utilities/BUILD.bazel | load("//tools/lint:lint.bzl", "add_lint_tests")
load(
"//tools/skylark:drake_cc.bzl",
"drake_cc_googletest",
"drake_cc_library",
"drake_cc_package_library",
)
package(default_visibility = ["//visibility:public"])
drake_cc_package_library(
name = "test_utilities",
testonly = 1,
visibility = ["//visibility:public"],
deps = [
":controlled_spring_mass_system",
":cubic_scalar_system",
":discontinuous_spring_mass_damper_system",
":explicit_error_controlled_integrator_test",
":generic_integrator_test",
":implicit_integrator_test",
":linear_scalar_system",
":logistic_system",
":my_spring_mass_system",
":pleides_system",
":quadratic_scalar_system",
":quartic_scalar_system",
":quintic_scalar_system",
":robertson_system",
":spring_mass_damper_system",
":spring_mass_system",
":stateless_system",
":stationary_system",
":stiff_double_mass_spring_system",
],
)
drake_cc_library(
name = "controlled_spring_mass_system",
testonly = 1,
srcs = ["controlled_spring_mass_system.cc"],
hdrs = ["controlled_spring_mass_system.h"],
deps = [
":spring_mass_system",
"//systems/controllers:pid_controller",
"//systems/framework",
"//systems/primitives:adder",
"//systems/primitives:constant_vector_source",
"//systems/primitives:demultiplexer",
"//systems/primitives:gain",
"//systems/primitives:multiplexer",
],
)
drake_cc_library(
name = "cubic_scalar_system",
testonly = 1,
hdrs = ["cubic_scalar_system.h"],
deps = [
"//systems/framework",
],
)
drake_cc_library(
name = "discontinuous_spring_mass_damper_system",
testonly = 1,
hdrs = ["discontinuous_spring_mass_damper_system.h"],
deps = [
":spring_mass_damper_system",
],
)
drake_cc_library(
name = "explicit_error_controlled_integrator_test",
testonly = 1,
hdrs = ["explicit_error_controlled_integrator_test.h"],
deps = [
":my_spring_mass_system",
"//common/test_utilities:expect_no_throw",
],
)
drake_cc_library(
name = "generic_integrator_test",
testonly = 1,
hdrs = ["generic_integrator_test.h"],
deps = [
"//multibody/plant",
],
)
drake_cc_library(
name = "implicit_integrator_test",
testonly = 1,
hdrs = ["implicit_integrator_test.h"],
deps = [
":discontinuous_spring_mass_damper_system",
":linear_scalar_system",
":my_spring_mass_system",
":robertson_system",
":stationary_system",
":stiff_double_mass_spring_system",
"//common/test_utilities:expect_no_throw",
],
)
drake_cc_library(
name = "linear_scalar_system",
testonly = 1,
hdrs = ["linear_scalar_system.h"],
deps = [
"//systems/framework",
],
)
drake_cc_library(
name = "logistic_system",
testonly = 1,
hdrs = ["logistic_system.h"],
deps = [
"//systems/framework",
],
)
drake_cc_library(
name = "my_spring_mass_system",
testonly = 1,
hdrs = ["my_spring_mass_system.h"],
deps = [
":spring_mass_system",
],
)
drake_cc_library(
name = "pleides_system",
testonly = 1,
hdrs = ["pleides_system.h"],
deps = [
"//systems/framework:leaf_system",
],
)
drake_cc_library(
name = "quadratic_scalar_system",
testonly = 1,
hdrs = ["quadratic_scalar_system.h"],
deps = [
"//systems/framework",
],
)
drake_cc_library(
name = "quartic_scalar_system",
testonly = 1,
hdrs = ["quartic_scalar_system.h"],
deps = [
"//systems/framework",
],
)
drake_cc_library(
name = "quintic_scalar_system",
testonly = 1,
hdrs = ["quintic_scalar_system.h"],
deps = [
"//systems/framework",
],
)
drake_cc_library(
name = "robertson_system",
testonly = 1,
hdrs = ["robertson_system.h"],
deps = [],
)
drake_cc_library(
name = "spring_mass_damper_system",
testonly = 1,
hdrs = ["spring_mass_damper_system.h"],
deps = [
":spring_mass_system",
],
)
drake_cc_library(
name = "spring_mass_system",
testonly = 1,
srcs = ["spring_mass_system.cc"],
hdrs = ["spring_mass_system.h"],
deps = [
"//systems/framework:leaf_system",
],
)
drake_cc_googletest(
name = "spring_mass_system_test",
size = "medium",
deps = [
":spring_mass_system",
"//common/test_utilities",
"//systems/framework/test_utilities",
],
)
drake_cc_library(
name = "stateless_system",
testonly = 1,
hdrs = ["stateless_system.h"],
deps = [
"//systems/framework",
],
)
drake_cc_library(
name = "stationary_system",
testonly = 1,
srcs = ["stationary_system.cc"],
hdrs = ["stationary_system.h"],
deps = [
"//common:default_scalars",
"//systems/framework",
],
)
drake_cc_library(
name = "stiff_double_mass_spring_system",
testonly = 1,
hdrs = ["stiff_double_mass_spring_system.h"],
deps = [],
)
drake_cc_googletest(
name = "controlled_spring_mass_system_test",
deps = [
":controlled_spring_mass_system",
],
)
add_lint_tests(enable_clang_format_lint = False)
| 0 |
/home/johnshepherd/drake/systems/analysis | /home/johnshepherd/drake/systems/analysis/test_utilities/generic_integrator_test.h | #pragma once
#include <memory>
#include <gtest/gtest.h>
#include "drake/multibody/plant/multibody_plant.h"
namespace drake {
namespace systems {
namespace analysis_test {
// T is the integrator type (e.g., RungeKutta3Integrator<double>).
template <class T>
struct GenericIntegratorTest : public ::testing::Test {
public:
void SetUp() {
plant_ = std::make_unique<multibody::MultibodyPlant<double>>(0.0);
// Add a single free body to the world.
const double radius = 0.05; // m
const double mass = 0.1; // kg
multibody::SpatialInertia<double> M_BBcm =
multibody::SpatialInertia<double>::SolidSphereWithMass(mass, radius);
plant_->AddRigidBody("Ball", M_BBcm);
plant_->Finalize();
context_ = MakePlantContext();
integrator_ = std::make_unique<T>(*plant_, context_.get());
}
std::unique_ptr<Context<double>> MakePlantContext() const {
std::unique_ptr<Context<double>> context = plant_->CreateDefaultContext();
// Set body linear and angular velocity.
Vector3<double> v0(1., 2., 3.); // Linear velocity in body's frame.
Vector3<double> w0(-4., 5., -6.); // Angular velocity in body's frame.
VectorX<double> generalized_velocities(6);
generalized_velocities << w0, v0;
plant_->SetVelocities(context.get(), generalized_velocities);
// Set body position and orientation.
Vector3<double> p0(1., 2., 3.); // Body's frame position in the world.
// Set body's frame orientation to 90 degree rotation about y-axis.
Vector4<double> q0(std::sqrt(2.) / 2., 0., std::sqrt(2.) / 2., 0.);
VectorX<double> generalized_positions(7);
generalized_positions << q0, p0;
plant_->SetPositions(context.get(), generalized_positions);
return context;
}
std::unique_ptr<multibody::MultibodyPlant<double>> plant_{};
std::unique_ptr<Context<double>> context_;
std::unique_ptr<T> integrator_;
};
TYPED_TEST_SUITE_P(GenericIntegratorTest);
// Verifies that the dense output is working for an integrator.
TYPED_TEST_P(GenericIntegratorTest, DenseOutput) {
this->integrator_->set_maximum_step_size(0.1);
// An accuracy that should be achievable with all integrators.
this->integrator_->set_target_accuracy(1e-5);
this->integrator_->Initialize();
// Start a dense integration i.e. one that generates a dense
// output for the state function.
this->integrator_->StartDenseIntegration();
const double t_final = 1.0;
// Arbitrary step, valid as long as it doesn't match the same
// steps taken by the integrator. Otherwise, dense output accuracy
// would not be checked.
const double h = 0.01;
const int n_steps = (t_final / h);
for (int i = 1; i <= n_steps; ++i) {
// Integrate the whole step.
this->integrator_->IntegrateWithMultipleStepsToTime(i * h);
// Check solution.
EXPECT_TRUE(CompareMatrices(
this->integrator_->get_dense_output()->value(
this->context_->get_time()),
this->plant_->GetPositionsAndVelocities(*this->context_),
this->integrator_->get_accuracy_in_use(), MatrixCompareType::relative));
}
// Stop undergoing dense integration.
std::unique_ptr<trajectories::PiecewisePolynomial<double>> dense_output =
this->integrator_->StopDenseIntegration();
EXPECT_FALSE(this->integrator_->get_dense_output());
// Integrate one more step.
this->integrator_->IntegrateWithMultipleStepsToTime(t_final + h);
// Verify that the dense output was not updated.
EXPECT_LT(dense_output->end_time(), this->context_->get_time());
}
// Confirm that integration supports times < 0.
TYPED_TEST_P(GenericIntegratorTest, NegativeTime) {
this->integrator_->set_maximum_step_size(0.1);
this->integrator_->set_target_accuracy(1e-5);
this->integrator_->Initialize();
this->context_->SetTime(-1.0);
this->integrator_->IntegrateWithMultipleStepsToTime(-0.5);
EXPECT_EQ(this->context_->get_time(), -0.5);
}
REGISTER_TYPED_TEST_SUITE_P(GenericIntegratorTest, DenseOutput, NegativeTime);
} // namespace analysis_test
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems/analysis | /home/johnshepherd/drake/systems/analysis/test_utilities/quartic_scalar_system.h | #pragma once
#include "drake/systems/framework/context.h"
#include "drake/systems/framework/leaf_system.h"
#include "drake/systems/framework/state.h"
namespace drake {
namespace systems {
namespace analysis_test {
/// System where the state at (scalar) time t corresponds to the quartic
/// equation t⁴ + 2t³ + 3t² + 4t + 5.
class QuarticScalarSystem : public LeafSystem<double> {
public:
QuarticScalarSystem() { this->DeclareContinuousState(1); }
/// Evaluates the system at time t.
double Evaluate(double t) const {
return t * (t * (t * (t + 2) + 3) + 4) + 5;
}
private:
void SetDefaultState(const Context<double>& context,
State<double>* state) const final {
const double t0 = 0.0;
state->get_mutable_continuous_state().get_mutable_vector()[0] =
Evaluate(t0);
}
void DoCalcTimeDerivatives(const Context<double>& context,
ContinuousState<double>* deriv) const override {
const double t = context.get_time();
(*deriv)[0] = t * (t * (4*t + 6) + 6) + 4;
}
};
} // namespace analysis_test
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems/analysis | /home/johnshepherd/drake/systems/analysis/test_utilities/my_spring_mass_system.h | #pragma once
#include <limits>
#include <memory>
#include <vector>
#include "drake/common/drake_copyable.h"
#include "drake/systems/analysis/test_utilities/spring_mass_system.h"
namespace drake {
namespace systems {
namespace analysis_test {
template <class T>
class MySpringMassSystem : public SpringMassSystem<T> {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(MySpringMassSystem)
// Pass through to SpringMassSystem, except add events and handlers.
MySpringMassSystem(double stiffness, double mass, double update_rate)
: SpringMassSystem<T>(stiffness, mass, false /*no input force*/) {
// This forced-publish event is necessary for any simulator_test case that
// needs to verify that the publish_every_time_step feature works.
this->DeclareForcedPublishEvent(&MySpringMassSystem::CountPublishes);
if (update_rate > 0.0) {
this->DeclarePeriodicDiscreteUpdateEvent(1.0 / update_rate, 0.0,
&MySpringMassSystem::CountDiscreteUpdates);
}
}
int get_publish_count() const { return publish_count_; }
int get_update_count() const { return update_count_; }
private:
EventStatus CountPublishes(const Context<T>&) const {
++publish_count_;
return EventStatus::Succeeded();
}
// The discrete equation update here is for the special case of zero
// discrete variables- in other words, this is just a counter.
EventStatus CountDiscreteUpdates(const Context<T>&,
DiscreteValues<T>*) const {
++update_count_;
return EventStatus::Succeeded();
}
mutable int publish_count_{0};
mutable int update_count_{0};
}; // MySpringMassSystem
} // namespace analysis_test
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems/analysis | /home/johnshepherd/drake/systems/analysis/test_utilities/implicit_integrator_test.h | #pragma once
#include <limits>
#include <memory>
#include <gtest/gtest.h>
#include "drake/common/test_utilities/expect_no_throw.h"
#include "drake/systems/analysis/implicit_integrator.h"
#include "drake/systems/analysis/test_utilities/discontinuous_spring_mass_damper_system.h"
#include "drake/systems/analysis/test_utilities/linear_scalar_system.h"
#include "drake/systems/analysis/test_utilities/robertson_system.h"
#include "drake/systems/analysis/test_utilities/spring_mass_damper_system.h"
#include "drake/systems/analysis/test_utilities/spring_mass_system.h"
#include "drake/systems/analysis/test_utilities/stationary_system.h"
#include "drake/systems/analysis/test_utilities/stiff_double_mass_spring_system.h"
namespace drake {
namespace systems {
// Forward declare VelocityImplicitEulerIntegrator for the Reuse test
// so that we can check the integrator type, because the test needs to be
// different for the VIE integrator, since it uses slightly different logic.
template <class T>
class VelocityImplicitEulerIntegrator;
namespace analysis_test {
enum ReuseType { kNoReuse, kReuse };
template <typename IntegratorType>
class ImplicitIntegratorTest : public ::testing::Test {
public:
ImplicitIntegratorTest() {
// Create the spring-mass systems.
spring_mass_ = std::make_unique<SpringMassSystem<double>>(
spring_k_, mass_, false /* no forcing */);
spring_mass_damper_ = std::make_unique<
implicit_integrator_test::SpringMassDamperSystem<double>>(
stiff_spring_k_, stiff_damping_b_, mass_);
// The discontinuous spring-mass-damper is critically damped.
mod_spring_mass_damper_ = std::make_unique<
implicit_integrator_test::DiscontinuousSpringMassDamperSystem<double>>(
semistiff_spring_k_, std::sqrt(semistiff_spring_k_ / mass_), mass_,
constant_force_mag_);
stiff_double_system_ =
std::make_unique<analysis::test::StiffDoubleMassSpringSystem<double>>();
// Contexts for single mass systems.
spring_mass_context_ = spring_mass_->CreateDefaultContext();
spring_mass_damper_context_ = spring_mass_damper_->CreateDefaultContext();
mod_spring_mass_damper_context_ =
mod_spring_mass_damper_->CreateDefaultContext();
// Separate context necessary for the double spring mass system.
dspring_context_ = stiff_double_system_->CreateDefaultContext();
}
void MiscAPITest(ReuseType type) {
// Create the integrator for a System<double>.
IntegratorType integrator(*spring_mass_, spring_mass_context_.get());
// Verifies set_reuse(flag) == get_reuse() == flag
integrator.set_reuse(reuse_type_to_bool(type));
EXPECT_EQ(integrator.get_reuse(), reuse_type_to_bool(type));
// Verifies that calling Initialize without setting step size target or
// maximum step size throws exception.
EXPECT_THROW(integrator.Initialize(), std::logic_error);
// Verify defaults match documentation.
EXPECT_EQ(integrator.get_jacobian_computation_scheme(),
IntegratorType::JacobianComputationScheme::kForwardDifference);
// Test that setting the target accuracy and initial step size target is
// successful.
integrator.set_maximum_step_size(h_);
if (integrator.supports_error_estimation()) {
integrator.set_target_accuracy(1.0);
integrator.request_initial_step_size_target(h_);
}
integrator.Initialize();
// Verifies that setting accuracy too loose (from above) makes the working
// accuracy different than the target accuracy after initialization.
if (integrator.supports_error_estimation()) {
EXPECT_NE(integrator.get_accuracy_in_use(),
integrator.get_target_accuracy());
} else {
EXPECT_TRUE(std::isnan(integrator.get_target_accuracy()));
}
}
// Solve a stiff double spring-mass damper. This system has a very stiff
// spring and damper connecting two point masses together, and one of the
// point masses is connected to "the world" using a spring with no damper. The
// solution of this system should approximate the solution of an undamped
// spring connected to a mass equal to the sum of both point masses.
void DoubleSpringMassDamperTest(ReuseType type) {
// Clone the spring mass system's state.
std::unique_ptr<State<double>> state_copy = dspring_context_->CloneState();
// Set integrator parameters.
IntegratorType integrator(*stiff_double_system_, dspring_context_.get());
// For fixed step integrators, we need to use a smaller step size to get
// the desired accuracy. By experimentation, we found that 0.1 h_ works.
double h = integrator.supports_error_estimation() ? large_h_ : 0.1 * h_;
// Designate the solution tolerance. For reference, the true positions are
// about 0.4351 and 1.4351.
const double sol_tol_pos = 2e-2;
// The velocity solution needs a looser tolerance in Radau1 and Implicit
// Euler. For reference, the true velocity is about -4.772.
const double sol_tol_vel = 1.2e-1;
integrator.set_maximum_step_size(h);
if (integrator.supports_error_estimation()) {
integrator.request_initial_step_size_target(h);
integrator.set_target_accuracy(1e-5);
}
integrator.set_reuse(reuse_type_to_bool(type));
// Get the solution at the target time.
const double t_final = 1.0;
stiff_double_system_->GetSolution(
*dspring_context_, t_final,
&state_copy->get_mutable_continuous_state());
// Take all the defaults.
integrator.Initialize();
// Integrate.
integrator.IntegrateWithMultipleStepsToTime(t_final);
// Check the position solution.
const VectorX<double> nsol = dspring_context_->get_continuous_state()
.get_generalized_position()
.CopyToVector();
const VectorX<double> sol = state_copy->get_continuous_state()
.get_generalized_position()
.CopyToVector();
for (int i = 0; i < nsol.size(); ++i)
EXPECT_NEAR(sol(i), nsol(i), sol_tol_pos);
// Check the velocity solution.
const VectorX<double> nsolv = dspring_context_->get_continuous_state()
.get_generalized_velocity()
.CopyToVector();
const VectorX<double> solv = state_copy->get_continuous_state()
.get_generalized_velocity()
.CopyToVector();
for (int i = 0; i < nsolv.size(); ++i)
EXPECT_NEAR(solv(i), nsolv(i), sol_tol_vel);
// Verify that integrator statistics are valid.
CheckGeneralStatsValidity(&integrator);
}
// Integrate the mass-spring-damping system using huge stiffness and damping.
// This equation should be stiff.
void SpringMassDamperStiffTest(ReuseType type) {
// Create the integrator.
IntegratorType integrator(*spring_mass_damper_,
spring_mass_damper_context_.get());
integrator.set_maximum_step_size(large_h_);
integrator.set_requested_minimum_step_size(10 * small_h_);
integrator.set_throw_on_minimum_step_size_violation(false);
integrator.set_reuse(reuse_type_to_bool(type));
// Set error controlled integration parameters.
const double xtol = 1e-6;
const double vtol = xtol * 100;
if (integrator.supports_error_estimation()) {
integrator.set_target_accuracy(xtol);
}
// Set the initial position and initial velocity.
const double initial_position = 1;
const double initial_velocity = 0.1;
// Set initial condition.
spring_mass_damper_->set_position(spring_mass_damper_context_.get(),
initial_position);
spring_mass_damper_->set_velocity(spring_mass_damper_context_.get(),
initial_velocity);
// Take all the defaults.
integrator.Initialize();
// Integrate for sufficient time for the spring to go to rest.
const double ttol = 1e2 * std::numeric_limits<double>::epsilon();
const double t_final = 2.0;
integrator.IntegrateWithMultipleStepsToTime(t_final);
// Check the time.
EXPECT_NEAR(spring_mass_damper_context_->get_time(), t_final, ttol);
// Get the final position and velocity.
const VectorBase<double>& xc_final =
spring_mass_damper_context_->get_continuous_state().get_vector();
double x_final = xc_final.GetAtIndex(0);
double v_final = xc_final.GetAtIndex(1);
// Get the closed form solution.
double x_final_true, v_final_true;
spring_mass_damper_->GetClosedFormSolution(initial_position,
initial_velocity, t_final,
&x_final_true, &v_final_true);
// Check the solution.
EXPECT_NEAR(x_final_true, x_final, xtol);
EXPECT_NEAR(v_final_true, v_final, vtol);
// Verify that integrator statistics are valid, and reset the statistics.
CheckGeneralStatsValidity(&integrator);
// Switch to central differencing.
integrator.set_jacobian_computation_scheme(
IntegratorType::JacobianComputationScheme::kCentralDifference);
// Reset the time, position, and velocity.
spring_mass_damper_context_->SetTime(0.0);
spring_mass_damper_->set_position(spring_mass_damper_context_.get(),
initial_position);
spring_mass_damper_->set_velocity(spring_mass_damper_context_.get(),
initial_velocity);
// Integrate for t_final seconds again.
integrator.IntegrateWithMultipleStepsToTime(t_final);
x_final = xc_final.GetAtIndex(0);
v_final = xc_final.GetAtIndex(1);
// Verify that integrator statistics and outputs are valid, and reset the
// statistics.
EXPECT_NEAR(x_final_true, x_final, xtol);
EXPECT_NEAR(v_final_true, v_final, vtol);
CheckGeneralStatsValidity(&integrator);
// Switch to automatic differencing.
integrator.set_jacobian_computation_scheme(
IntegratorType::JacobianComputationScheme::kAutomatic);
// Reset the time, position, and velocity.
spring_mass_damper_context_->SetTime(0.0);
spring_mass_damper_->set_position(spring_mass_damper_context_.get(),
initial_position);
spring_mass_damper_->set_velocity(spring_mass_damper_context_.get(),
initial_velocity);
// Integrate for t_final seconds again.
integrator.IntegrateWithMultipleStepsToTime(t_final);
x_final = xc_final.GetAtIndex(0);
v_final = xc_final.GetAtIndex(1);
// Verify that error control was used by making sure that the minimum step
// size was smaller than large_h_.
EXPECT_LT(integrator.get_smallest_adapted_step_size_taken(), large_h_);
// Verify that integrator statistics and outputs are valid.
EXPECT_NEAR(x_final_true, x_final, xtol);
EXPECT_NEAR(v_final_true, v_final, vtol);
CheckGeneralStatsValidity(&integrator);
}
// Integrate the modified mass-spring-damping system, which exhibits a
// discontinuity in the velocity derivative at spring position x = 0.
void DiscontinuousSpringMassDamperTest(ReuseType type) {
// Create the integrator.
IntegratorType integrator(*mod_spring_mass_damper_,
mod_spring_mass_damper_context_.get());
integrator.set_maximum_step_size(h_);
integrator.set_throw_on_minimum_step_size_violation(false);
if (integrator.supports_error_estimation()) {
integrator.set_target_accuracy(1e-5);
}
integrator.set_reuse(reuse_type_to_bool(type));
// Setting the minimum step size speeds the unit test without (in this case)
// affecting solution accuracy.
integrator.set_requested_minimum_step_size(1e-8);
// Set the initial position and initial velocity.
const double initial_position = 1e-8;
const double initial_velocity = 0;
// Set initial condition.
mod_spring_mass_damper_->set_position(mod_spring_mass_damper_context_.get(),
initial_position);
mod_spring_mass_damper_->set_velocity(mod_spring_mass_damper_context_.get(),
initial_velocity);
// Take all the defaults.
integrator.Initialize();
// Establish tolerances for time and solution. These tolerances are
// arbitrary but seem to work well.
const double ttol = 1e2 * std::numeric_limits<double>::epsilon();
const double sol_tol = 1e-12;
// Integrate for 1 second.
const double t_final = 1.0;
integrator.IntegrateWithMultipleStepsToTime(t_final);
// Check the time.
EXPECT_NEAR(mod_spring_mass_damper_context_->get_time(), t_final, ttol);
// Get the final position and velocity.
double x_final = mod_spring_mass_damper_context_->get_continuous_state()
.get_vector()
.GetAtIndex(0);
double xdot_final = mod_spring_mass_damper_context_->get_continuous_state()
.get_vector()
.GetAtIndex(1);
// TODO(edrumwri) accurate x_final should be the equilibrium solution, where
// the velocity is zero and the spring and external forces are equal (and
// opposite).
const double equilibrium_position =
-constant_force_magnitude() / semistiff_spring_stiffness();
const double equilibrium_velocity = 0.0;
// Verify that solution and integrator statistics are valid and reset
// the statistics.
EXPECT_NEAR(equilibrium_position, x_final, sol_tol);
EXPECT_NEAR(equilibrium_velocity, xdot_final, sol_tol);
CheckGeneralStatsValidity(&integrator);
// Switch the Jacobian scheme to central differencing.
integrator.set_jacobian_computation_scheme(
IntegratorType::JacobianComputationScheme::kCentralDifference);
// Reset the time, position, and velocity.
mod_spring_mass_damper_context_->SetTime(0.0);
mod_spring_mass_damper_->set_position(
mod_spring_mass_damper_context_.get(), initial_position);
mod_spring_mass_damper_->set_velocity(
mod_spring_mass_damper_context_.get(), initial_velocity);
// Integrate again.
integrator.IntegrateWithMultipleStepsToTime(t_final);
// Check the solution and the time again, and reset the statistics again.
x_final = mod_spring_mass_damper_context_->get_continuous_state()
.get_vector()
.GetAtIndex(0);
xdot_final = mod_spring_mass_damper_context_->get_continuous_state()
.get_vector()
.GetAtIndex(1);
EXPECT_NEAR(mod_spring_mass_damper_context_->get_time(), t_final, ttol);
EXPECT_NEAR(equilibrium_position, x_final, sol_tol);
EXPECT_NEAR(equilibrium_velocity, xdot_final, sol_tol);
CheckGeneralStatsValidity(&integrator);
// Switch the Jacobian scheme to automatic differentiation.
integrator.set_jacobian_computation_scheme(
IntegratorType::JacobianComputationScheme::kAutomatic);
// Reset the time, position, and velocity.
mod_spring_mass_damper_context_->SetTime(0.0);
mod_spring_mass_damper_->set_position(
mod_spring_mass_damper_context_.get(), initial_position);
mod_spring_mass_damper_->set_velocity(
mod_spring_mass_damper_context_.get(), initial_velocity);
// Integrate again.
integrator.IntegrateWithMultipleStepsToTime(t_final);
// Check the solution and the time again.
x_final = mod_spring_mass_damper_context_->get_continuous_state()
.get_vector()
.GetAtIndex(0);
xdot_final = mod_spring_mass_damper_context_->get_continuous_state()
.get_vector()
.GetAtIndex(1);
EXPECT_NEAR(mod_spring_mass_damper_context_->get_time(), t_final, ttol);
EXPECT_NEAR(equilibrium_position, x_final, sol_tol);
EXPECT_NEAR(equilibrium_velocity, xdot_final, sol_tol);
CheckGeneralStatsValidity(&integrator);
}
// Integrate an undamped system and check its solution accuracy.
void SpringMassStepTest(ReuseType type) {
const double spring_k = 300.0; // N/m
// Create a new spring-mass system.
SpringMassSystem<double> spring_mass(spring_k, mass_, false);
std::unique_ptr<Context<double>> context =
spring_mass.CreateDefaultContext();
// Set integrator parameters; we want error control to initially "fail",
// necessitating step size adjustment.
IntegratorType integrator(spring_mass, context.get());
// For fixed step integrators, we need to use a smaller step size to get
// the desired accuracy. By experimentation, we found that 0.5 h_ works.
double h = integrator.supports_error_estimation() ? large_h_ : 0.5 * h_;
integrator.set_maximum_step_size(h);
if (integrator.supports_error_estimation()) {
integrator.request_initial_step_size_target(h);
integrator.set_target_accuracy(5e-5);
}
integrator.set_requested_minimum_step_size(1e-6);
integrator.set_reuse(reuse_type_to_bool(type));
// Setup the initial position and initial velocity.
const double initial_position = 0.1;
const double initial_velocity = 0.01;
// Set initial condition.
spring_mass.set_position(context.get(), initial_position);
spring_mass.set_velocity(context.get(), initial_velocity);
// Take all the defaults.
integrator.Initialize();
// Integrate for 1 second.
const double ttol = 1e2 * std::numeric_limits<double>::epsilon();
const double t_final = 1.0;
integrator.IntegrateWithMultipleStepsToTime(t_final);
// Check the time.
EXPECT_NEAR(context->get_time(), t_final, ttol);
// Get the final position.
double x_final = context->get_continuous_state().get_vector().GetAtIndex(0);
// Compute the true solution at t_final.
double x_final_true, v_final_true;
spring_mass.GetClosedFormSolution(initial_position, initial_velocity,
t_final, &x_final_true, &v_final_true);
// Check the solution to the same tolerance as the explicit Euler
// integrator (see explicit_euler_integrator_test.cc, SpringMassStep).
EXPECT_NEAR(x_final_true, x_final, 5e-3);
// Verify that integrator statistics are valid and reset the statistics.
CheckGeneralStatsValidity(&integrator);
// Switch to central differencing.
integrator.set_jacobian_computation_scheme(
IntegratorType::JacobianComputationScheme::kCentralDifference);
// Reset the time, position, and velocity.
context->SetTime(0.0);
spring_mass.set_position(context.get(), initial_position);
spring_mass.set_velocity(context.get(), initial_velocity);
// Integrate for t_final seconds again.
integrator.IntegrateWithMultipleStepsToTime(t_final);
// Check results again.
x_final = context->get_continuous_state().get_vector().GetAtIndex(0);
EXPECT_NEAR(x_final_true, x_final, 5e-3);
EXPECT_NEAR(context->get_time(), t_final, ttol);
// Verify that integrator statistics are valid and reset the statistics.
CheckGeneralStatsValidity(&integrator);
// Switch to automatic differentiation.
integrator.set_jacobian_computation_scheme(
IntegratorType::JacobianComputationScheme::kAutomatic);
// Reset the time, position, and velocity.
context->SetTime(0.0);
spring_mass.set_position(context.get(), initial_position);
spring_mass.set_velocity(context.get(), initial_velocity);
// Integrate for t_final seconds again.
integrator.IntegrateWithMultipleStepsToTime(t_final);
// Check results again.
x_final = context->get_continuous_state().get_vector().GetAtIndex(0);
EXPECT_NEAR(x_final_true, x_final, 5e-3);
EXPECT_NEAR(context->get_time(), t_final, ttol);
// Verify that integrator statistics are valid
CheckGeneralStatsValidity(&integrator);
}
// Checks the error estimator for the implicit Euler integrator using the
// spring-mass system:
// d^2x/dt^2 = -kx/m
// solution to this ODE: x(t) = c1*cos(omega*t) + c2*sin(omega*t)
// where omega = sqrt(k/m)
// ẋ(t) = -c1*sin(omega*t)*omega + c2*cos(omega*t)*omega
// for t = 0, x(0) = c1, ẋ(0) = c2*omega
void ErrorEstimationTest(ReuseType type) {
const double spring_k = 300.0; // N/m
// Create a new spring-mass system.
SpringMassSystem<double> spring_mass(spring_k, mass_, false);
std::unique_ptr<Context<double>> context =
spring_mass.CreateDefaultContext();
// Set the integrator to operate in fixed step mode.
IntegratorType integrator(spring_mass, context.get());
// Skip this test if the integrator doesn't have error estimate.
if (!integrator.supports_error_estimation()) GTEST_SKIP();
integrator.set_maximum_step_size(large_h_);
integrator.set_fixed_step_mode(true);
integrator.set_reuse(reuse_type_to_bool(type));
// Use automatic differentiation because we can.
integrator.set_jacobian_computation_scheme(
IntegratorType::JacobianComputationScheme::kAutomatic);
// Create the initial positions and velocities.
const int n_initial_conditions = 3;
const double initial_position[n_initial_conditions] = {0.1, 1.0, 0.0};
const double initial_velocity[n_initial_conditions] = {0.01, 1.0, -10.0};
// Create the integration step size array. NOTE: h values greater than 1e-2
// (or so) results in very poor error estimates. h values smaller than 1e-8
// (or so) results in NaN relative errors (indicating that solution matches
// ideal one to very high accuracy).
const int n_h = 4;
const double h[n_h] = {1e-8, 1e-4, 1e-3, 1e-2};
// Take all the defaults.
integrator.Initialize();
// Set the allowed error on the time.
const double ttol = 10 * std::numeric_limits<double>::epsilon();
// Set the error estimate tolerance on absolute error. We get this by
// starting from 1e-2 for a step size of 1e-2 and then multiply be 1e-2 for
// each order of magnitude decrease in step size. This yields a quadratic
// reduction in error, as expected.
const double atol[n_h] = {1e-14, 1e-6, 1e-4, 0.01};
// Iterate the specified number of initial conditions.
// Iterate over the number of integration step sizes.
for (int j = 0; j < n_h; ++j) {
for (int i = 0; i < n_initial_conditions; ++i) {
// Reset the time.
context->SetTime(0.0);
// Set initial condition.
spring_mass.set_position(context.get(), initial_position[i]);
spring_mass.set_velocity(context.get(), initial_velocity[i]);
// Integrate for the desired step size.
ASSERT_TRUE(integrator.IntegrateWithSingleFixedStepToTime(
context->get_time() + h[j]));
// Check the time.
EXPECT_NEAR(context->get_time(), h[j], ttol);
// Get the error estimate.
const double est_err =
std::abs(integrator.get_error_estimate()->CopyToVector()[0]);
// Get the final position of the spring.
const double x_final =
context->get_continuous_state().get_vector().GetAtIndex(0);
// Get the true position.
double x_final_true, v_final_true;
spring_mass.GetClosedFormSolution(initial_position[i],
initial_velocity[i], h[j],
&x_final_true, &v_final_true);
// Check the relative error on position.
const double err = std::abs(x_final - x_final_true);
const double err_est_err = std::abs(err - est_err);
EXPECT_LE(err, atol[j]);
EXPECT_LE(err_est_err, atol[j]);
}
}
}
// Integrate over a significant period of time to verify that global error
// estimation acts as we expect.
void SpringMassStepAccuracyEffectsTest(ReuseType type) {
const double spring_k = 300.0; // N/m
// Create a new spring-mass system.
SpringMassSystem<double> spring_mass(spring_k, mass_, false);
std::unique_ptr<Context<double>> context =
spring_mass.CreateDefaultContext();
// Spring-mass system is necessary only to setup the problem.
IntegratorType integrator(spring_mass, context.get());
if (!integrator.supports_error_estimation()) GTEST_SKIP();
integrator.set_maximum_step_size(large_h_);
integrator.set_requested_minimum_step_size(small_h_);
integrator.set_throw_on_minimum_step_size_violation(false);
integrator.set_target_accuracy(1e-4);
integrator.set_reuse(reuse_type_to_bool(type));
// Setup the initial position and initial velocity.
const double initial_position = 0.1;
const double initial_velocity = 0.01;
// Set initial condition.
spring_mass.set_position(context.get(), initial_position);
spring_mass.set_velocity(context.get(), initial_velocity);
// Take all the defaults.
integrator.Initialize();
EXPECT_NEAR(integrator.get_accuracy_in_use(), 1e-4,
std::numeric_limits<double>::epsilon());
// Get the actual solution.
double x_final_true, v_final_true;
spring_mass.GetClosedFormSolution(initial_position, initial_velocity,
large_h_, &x_final_true, &v_final_true);
// Integrate exactly one step.
integrator.IntegrateWithMultipleStepsToTime(context->get_time() + large_h_);
// Get the positional error.
const double pos_err = std::abs(
x_final_true - context->get_continuous_state_vector().GetAtIndex(0));
// Make the accuracy setting looser, integrate again, and verify that
// positional error increases.
integrator.set_target_accuracy(100.0);
EXPECT_NEAR(integrator.get_accuracy_in_use(), 100.0,
std::numeric_limits<double>::epsilon());
integrator.Initialize();
context->SetTime(0);
spring_mass.set_position(context.get(), initial_position);
spring_mass.set_velocity(context.get(), initial_velocity);
integrator.IntegrateWithMultipleStepsToTime(context->get_time() + large_h_);
EXPECT_GT(std::abs(x_final_true -
context->get_continuous_state_vector().GetAtIndex(0)),
pos_err);
}
double h() const { return h_; }
double constant_force_magnitude() const { return constant_force_mag_; }
double semistiff_spring_stiffness() const { return semistiff_spring_k_; }
const SpringMassSystem<double>& spring_mass() const { return *spring_mass_; }
// Checks the validity of general integrator statistics and resets statistics.
static void CheckGeneralStatsValidity(IntegratorType* integrator) {
EXPECT_GT(integrator->get_num_newton_raphson_iterations(), 0);
if (integrator->supports_error_estimation()) {
EXPECT_GT(integrator->get_num_error_estimator_newton_raphson_iterations(),
0);
}
EXPECT_GT(integrator->get_previous_integration_step_size(), 0.0);
EXPECT_GT(integrator->get_largest_step_size_taken(), 0.0);
EXPECT_GE(integrator->get_num_steps_taken(), 0);
EXPECT_GT(integrator->get_num_derivative_evaluations(), 0);
if (integrator->supports_error_estimation()) {
EXPECT_GE(integrator->get_num_error_estimator_derivative_evaluations(),
0);
}
EXPECT_GT(integrator->get_num_derivative_evaluations_for_jacobian(), 0);
if (integrator->supports_error_estimation()) {
EXPECT_GE(
integrator
->get_num_error_estimator_derivative_evaluations_for_jacobian(),
0);
}
EXPECT_GE(integrator->get_num_jacobian_evaluations(), 0);
if (integrator->supports_error_estimation()) {
EXPECT_GE(integrator->get_num_error_estimator_jacobian_evaluations(), 0);
}
EXPECT_GE(integrator->get_num_iteration_matrix_factorizations(), 0);
if (integrator->supports_error_estimation()) {
EXPECT_GE(
integrator->get_num_error_estimator_iteration_matrix_factorizations(),
0);
}
EXPECT_GE(integrator->get_num_substep_failures(), 0);
EXPECT_GE(integrator->get_num_step_shrinkages_from_substep_failures(), 0);
EXPECT_GE(integrator->get_num_step_shrinkages_from_error_control(), 0);
integrator->ResetStatistics();
}
protected:
std::unique_ptr<Context<double>> spring_mass_context_;
std::unique_ptr<Context<double>> spring_mass_damper_context_;
std::unique_ptr<Context<double>> mod_spring_mass_damper_context_;
std::unique_ptr<Context<double>> dspring_context_;
std::unique_ptr<SpringMassSystem<double>> spring_mass_;
std::unique_ptr<implicit_integrator_test::SpringMassDamperSystem<double>>
spring_mass_damper_;
std::unique_ptr<
implicit_integrator_test::DiscontinuousSpringMassDamperSystem<double>>
mod_spring_mass_damper_;
std::unique_ptr<analysis::test::StiffDoubleMassSpringSystem<double>>
stiff_double_system_;
private:
bool reuse_type_to_bool(ReuseType type) {
if (type == kNoReuse) {
return false;
} else {
return true;
}
}
const double h_ = 1e-3; // Default integration step size.
const double large_h_ = 1e-1; // Large integration step size.
const double small_h_ = 1e-6; // Smallest integration step size.
const double mass_ = 2.0; // Default particle mass.
const double constant_force_mag_ = 10; // Magnitude of the constant force.
// Default spring constant. Corresponds to a frequency of 0.1125 cycles per
// second without damping, assuming that mass = 2 (using formula
// f = sqrt(k/mass)/(2*pi), where k is the spring constant, and f is the
// frequency in cycles per second).
const double spring_k_ = 1.0;
// Default spring constant for a semi-stiff spring. Corresponds to a
// frequency of 35.588 cycles per second without damping, assuming that mass
// = 2 (using formula f = sqrt(k/mass)/(2*pi), where k is the spring
// constant, and f is the frequency in cycles per second).
const double semistiff_spring_k_ = 1e5;
// Default spring constant for a stiff spring. Corresponds to a frequency
// of 11,254 cycles per second without damping, assuming that mass = 2
// (using formula f = sqrt(k/mass)/(2*pi), where k is the spring constant,
// and f is the frequency in cycles per second).
const double stiff_spring_k_ = 1e10;
// Default semi-stiff (in the computational sense) damping coefficient.
// For the "modified" spring and damper, and assuming that mass = 2 and
// stiff_spring_k = 1e10, this will result in a damping ratio of
// damping_b / (2*sqrt(mass*stiff_spring_k)) = 0.035, meaning that
// the system is underdamped.
const double damping_b_ = 1e4;
// Default stiff (in the computational sense) damping coefficient. For
// the "vanilla" spring and damper, and assuming that mass = 2 and
// stiff_spring_k = 1e10, this will result in a damping ratio of
// stiff_damping_b / (2*sqrt(mass*stiff_spring_k)) = 353, meaning
// that the system is overdamped.
const double stiff_damping_b_ = 1e8;
};
TYPED_TEST_SUITE_P(ImplicitIntegratorTest);
TYPED_TEST_P(ImplicitIntegratorTest, MiscAPINoReuse) {
this->MiscAPITest(kNoReuse);
}
TYPED_TEST_P(ImplicitIntegratorTest, MiscAPIReuse) {
this->MiscAPITest(kReuse);
}
// Tests the Jacobian and iteration matrix reuse strategies using a test
// problem and integrator for which we have knowledge of the convergence
// behavior from the initial state.
TYPED_TEST_P(ImplicitIntegratorTest, Reuse) {
std::unique_ptr<analysis::test::RobertsonSystem<double>> robertson =
std::make_unique<analysis::test::RobertsonSystem<double>>();
std::unique_ptr<Context<double>> context = robertson->CreateDefaultContext();
// Create the integrator.
using Integrator = TypeParam;
Integrator integrator(*robertson, context.get());
integrator.set_maximum_step_size(1e-2); // Maximum step to be attempted.
integrator.set_throw_on_minimum_step_size_violation(false);
integrator.set_fixed_step_mode(true);
integrator.set_reuse(true); // The whole point of this.
// Attempt to integrate the system. Our past experience indicates that this
// system fails to converge from the initial state for this large step size.
// This tests the case where the Jacobian matrix has yet to be formed. There
// should be one Jacobian matrix evaluation - once at trial 1. There should
// also be two iteration matrix factorizations: once at trial 1, and another
// at trial 2. Trial 3 should be skipped because the first Jacobian matrix
// computation makes the Jacobian "fresh". The exception is the
// VelocityImplicitEulerIntegrator, which will recompute both on trial 3
// because it does not reuse Jacobians for different step sizes; hence
// it will have 3 factorizations and 2 Jacobian evaluations.
// TODO(antequ): see TODO in ImplicitIntegrator::MaybeFreshenMatrices()
// for potential improvements that will require changes here.
integrator.Initialize();
ASSERT_FALSE(integrator.IntegrateWithSingleFixedStepToTime(1e-2));
if (!std::is_same_v<Integrator, VelocityImplicitEulerIntegrator<double>>) {
EXPECT_EQ(integrator.get_num_iteration_matrix_factorizations(), 2);
EXPECT_EQ(integrator.get_num_jacobian_evaluations(), 1);
} else {
EXPECT_EQ(integrator.get_num_iteration_matrix_factorizations(), 3);
EXPECT_EQ(integrator.get_num_jacobian_evaluations(), 2);
}
// Now integrate again but with a smaller size. Again, past experience
// that this step size should be sufficiently small for the integrator to
// converge. The Jacobian matrix will be "fresh"; we assume no knowledge
// of the number of iteration matrix factorizations.
integrator.ResetStatistics();
ASSERT_TRUE(integrator.IntegrateWithSingleFixedStepToTime(1e-6));
EXPECT_EQ(integrator.get_num_jacobian_evaluations(), 0);
// Again try taking a large step, which we expect will be too large to
// converge. There should be one Jacobian matrix evaluation- once at trial 3.
// There should be two iteration matrix factorizations: one at trial 2 and
// another at trial 3.
integrator.ResetStatistics();
ASSERT_FALSE(integrator.IntegrateWithSingleFixedStepToTime(1e-2));
EXPECT_EQ(integrator.get_num_iteration_matrix_factorizations(), 2);
EXPECT_EQ(integrator.get_num_jacobian_evaluations(), 1);
}
// Tests that the full-Newton approach computes a Jacobian matrix and factorizes
// the iteration matrix on every Newton-Raphson iteration.
TYPED_TEST_P(ImplicitIntegratorTest, FullNewton) {
std::unique_ptr<analysis::test::RobertsonSystem<double>> robertson =
std::make_unique<analysis::test::RobertsonSystem<double>>();
std::unique_ptr<Context<double>> context = robertson->CreateDefaultContext();
// Create the integrator.
using Integrator = TypeParam;
Integrator integrator(*robertson, context.get());
if (integrator.supports_error_estimation()) {
integrator.request_initial_step_size_target(1e0);
} else {
integrator.set_maximum_step_size(1e0);
}
integrator.set_throw_on_minimum_step_size_violation(false);
integrator.set_fixed_step_mode(true);
integrator.set_use_full_newton(true); // The whole point of this test.
// Attempt to integrate the system. Our past experience indicates that this
// system fails to converge from the initial state for this large step size.
// This tests the case where the Jacobian matrix has yet to be formed.
integrator.Initialize();
ASSERT_FALSE(integrator.IntegrateWithSingleFixedStepToTime(1e0));
EXPECT_EQ(integrator.get_num_iteration_matrix_factorizations(),
integrator.get_num_newton_raphson_iterations());
EXPECT_EQ(integrator.get_num_jacobian_evaluations(),
integrator.get_num_newton_raphson_iterations());
// Now integrate again but with a smaller size. Again, past experience tells
// us that this step size should be sufficiently small for the integrator to
// converge.
integrator.ResetStatistics();
ASSERT_TRUE(integrator.IntegrateWithSingleFixedStepToTime(1e-6));
EXPECT_EQ(integrator.get_num_iteration_matrix_factorizations(),
integrator.get_num_newton_raphson_iterations());
EXPECT_EQ(integrator.get_num_jacobian_evaluations(),
integrator.get_num_newton_raphson_iterations());
// Again try taking a large step, which we expect will be too large to
// converge.
integrator.ResetStatistics();
ASSERT_FALSE(integrator.IntegrateWithSingleFixedStepToTime(1e0));
EXPECT_EQ(integrator.get_num_iteration_matrix_factorizations(),
integrator.get_num_newton_raphson_iterations());
EXPECT_EQ(integrator.get_num_jacobian_evaluations(),
integrator.get_num_newton_raphson_iterations());
}
// Tests the implicit integrator on a stationary system problem, which
// stresses numerical differentiation (since the state does not change).
// This test also verifies that integration with AutoDiff'd Jacobians
// succeeds when the derivative does not depend on the state.
TYPED_TEST_P(ImplicitIntegratorTest, Stationary) {
auto stationary = std::make_unique<StationarySystem<double>>();
std::unique_ptr<Context<double>> context = stationary->CreateDefaultContext();
// Set the initial condition for the stationary system.
VectorBase<double>& state =
context->get_mutable_continuous_state().get_mutable_vector();
state.SetAtIndex(0, 0.0);
state.SetAtIndex(1, 0.0);
// Create the integrator.
using Integrator = TypeParam;
Integrator integrator(*stationary, context.get());
integrator.set_maximum_step_size(1.0);
if (integrator.supports_error_estimation()) {
integrator.set_target_accuracy(1e-3);
integrator.request_initial_step_size_target(1e-3);
}
// Integrate the system
integrator.Initialize();
integrator.IntegrateWithMultipleStepsToTime(1.0);
// Verify the solution.
EXPECT_NEAR(state.GetAtIndex(0), 0, std::numeric_limits<double>::epsilon());
EXPECT_NEAR(state.GetAtIndex(1), 0, std::numeric_limits<double>::epsilon());
ImplicitIntegratorTest<Integrator>::CheckGeneralStatsValidity(&integrator);
// Set up the same problem with an AutoDiff'd Jacobian computation
// scheme.
integrator.set_jacobian_computation_scheme(
Integrator::JacobianComputationScheme::kAutomatic);
// Reset the time, position, and velocity.
context->SetTime(0.0);
VectorBase<double>& new_state =
context->get_mutable_continuous_state().get_mutable_vector();
new_state.SetAtIndex(0, 0.0);
new_state.SetAtIndex(1, 0.0);
integrator.set_maximum_step_size(1.0);
if (integrator.supports_error_estimation()) {
integrator.set_target_accuracy(1e-3);
integrator.request_initial_step_size_target(1e-3);
}
// Integrate the system
integrator.IntegrateWithMultipleStepsToTime(1.0);
// Verify the solution.
EXPECT_NEAR(new_state.GetAtIndex(0), 0,
std::numeric_limits<double>::epsilon());
EXPECT_NEAR(new_state.GetAtIndex(1), 0,
std::numeric_limits<double>::epsilon());
ImplicitIntegratorTest<Integrator>::CheckGeneralStatsValidity(&integrator);
}
// Tests the implicit integrator on Robertson's stiff chemical reaction
// problem, which has been used to benchmark various implicit integrators.
// This problem is particularly good at testing large step sizes (since the
// solution quickly converges) and long simulation times.
TYPED_TEST_P(ImplicitIntegratorTest, Robertson) {
std::unique_ptr<analysis::test::RobertsonSystem<double>> robertson =
std::make_unique<analysis::test::RobertsonSystem<double>>();
std::unique_ptr<Context<double>> context = robertson->CreateDefaultContext();
const double t_final = robertson->get_end_time();
const double tol = 5e-5;
// Create the integrator.
using Integrator = TypeParam;
Integrator integrator(*robertson, context.get());
// Very large step is necessary for this problem since given solution is
// at t = 1e11. However, the current initial step size selection algorithm
// will use a large factor of the maximum step size, which can result in
// too large an initial step for this problem. Accordingly, we explicitly
// select a small initial step size.
// @TODO(edrumwri): Explore a better algorithm for selecting the initial
// step size (see issue #6329).
integrator.set_maximum_step_size(10000000.0);
integrator.set_throw_on_minimum_step_size_violation(false);
if (integrator.supports_error_estimation()) {
integrator.set_target_accuracy(tol);
integrator.request_initial_step_size_target(1e-4);
}
// Integrate the system
integrator.Initialize();
integrator.IntegrateWithMultipleStepsToTime(t_final);
// Verify the solution.
const VectorBase<double>& state =
context->get_continuous_state().get_vector();
const Eigen::Vector3d sol = robertson->GetSolution(t_final);
EXPECT_NEAR(state.GetAtIndex(0), sol(0), tol);
EXPECT_NEAR(state.GetAtIndex(1), sol(1), tol);
EXPECT_NEAR(state.GetAtIndex(2), sol(2), tol);
}
TYPED_TEST_P(ImplicitIntegratorTest, FixedStepThrowsOnMultiStep) {
auto robertson = std::make_unique<analysis::test::RobertsonSystem<double>>();
std::unique_ptr<Context<double>> context = robertson->CreateDefaultContext();
// Relatively large step size that we know fails to converge from the initial
// state.
const double h = 1e-2;
// Create the integrator.
using Integrator = TypeParam;
Integrator integrator(*robertson, context.get());
// Make sure integrator can take the size we want.
integrator.set_maximum_step_size(h);
// Enable fixed stepping.
integrator.set_fixed_step_mode(true);
// Values we have used successfully in other Robertson system tests.
if (integrator.supports_error_estimation()) {
integrator.set_target_accuracy(5e-5);
}
// Integrate to the desired step time. We expect this to return false because
// the integrator is generally unlikely to converge for such a relatively
// large step.
integrator.Initialize();
EXPECT_FALSE(
integrator.IntegrateWithSingleFixedStepToTime(context->get_time() + h));
}
TYPED_TEST_P(ImplicitIntegratorTest, ContextAccess) {
// Create the integrator.
using Integrator = TypeParam;
Integrator integrator(this->spring_mass(), this->spring_mass_context_.get());
integrator.get_mutable_context()->SetTime(3.);
EXPECT_EQ(integrator.get_context().get_time(), 3.);
const double t_final = integrator.get_context().get_time() + this->h();
integrator.reset_context(nullptr);
EXPECT_THROW(integrator.Initialize(), std::logic_error);
EXPECT_THROW(integrator.IntegrateNoFurtherThanTime(t_final, t_final, t_final),
std::logic_error);
}
/// Verifies error estimation is supported.
TYPED_TEST_P(ImplicitIntegratorTest, AccuracyEstAndErrorControl) {
// Spring-mass system is necessary only to setup the problem.
using Integrator = TypeParam;
Integrator integrator(this->spring_mass(), this->spring_mass_context_.get());
if (!integrator.supports_error_estimation()) GTEST_SKIP();
EXPECT_EQ(integrator.supports_error_estimation(), true);
DRAKE_EXPECT_NO_THROW(integrator.set_target_accuracy(1e-1));
DRAKE_EXPECT_NO_THROW(integrator.request_initial_step_size_target(this->h()));
}
// Tests accuracy for integrating linear systems (with the state at time t
// corresponding to f(t) ≡ St + C, where S is a scalar and C is the initial
// state) over t ∈ [0, 1]. The asymptotic term in every implicit integrator's
// error estimate is at least second order, meaning that it uses the Taylor
// Series expansion: f(t+h) ≈ f(t) + hf'(t) + O(h²). This formula indicates that
// the approximation error will be zero if f''(t) = 0, which is true for linear
// systems. We check that the error estimator gives a perfect error estimate for
// this function.
TYPED_TEST_P(ImplicitIntegratorTest, LinearTest) {
LinearScalarSystem linear;
auto linear_context = linear.CreateDefaultContext();
const double C = linear.Evaluate(0);
linear_context->SetTime(0.0);
linear_context->get_mutable_continuous_state_vector()[0] = C;
using Integrator = TypeParam;
Integrator integrator1(linear, linear_context.get());
const double t_final = 1.0;
integrator1.set_maximum_step_size(t_final);
integrator1.set_fixed_step_mode(true);
integrator1.Initialize();
ASSERT_TRUE(integrator1.IntegrateWithSingleFixedStepToTime(t_final));
if (integrator1.supports_error_estimation()) {
const double err_est = integrator1.get_error_estimate()->get_vector()[0];
// Note the very tight tolerance used, which will likely not hold for
// arbitrary values of C, t_final, or polynomial coefficients.
EXPECT_NEAR(err_est, 0.0, 2 * std::numeric_limits<double>::epsilon());
// Verify the solution.
VectorX<double> state =
linear_context->get_continuous_state().get_vector().CopyToVector();
EXPECT_NEAR(state[0], linear.Evaluate(t_final),
std::numeric_limits<double>::epsilon());
// Repeat this test, but using a final time that is below the working
// minimum step size (thereby triggering the implicit integrator's
// alternate, explicit mode). To retain our existing tolerances, we change
// the scale factor (S) for the linear system.
integrator1.get_mutable_context()->SetTime(0);
const double working_min = integrator1.get_working_minimum_step_size();
LinearScalarSystem scaled_linear(4.0 / working_min);
auto scaled_linear_context = scaled_linear.CreateDefaultContext();
Integrator integrator2(scaled_linear, scaled_linear_context.get());
const double updated_t_final = working_min / 2;
integrator2.set_maximum_step_size(updated_t_final);
integrator2.set_fixed_step_mode(true);
integrator2.Initialize();
ASSERT_TRUE(
integrator2.IntegrateWithSingleFixedStepToTime(updated_t_final));
const double updated_err_est =
integrator2.get_error_estimate()->get_vector()[0];
// Note the very tight tolerance used, which will likely not hold for
// arbitrary values of C, t_final, or polynomial coefficients.
EXPECT_NEAR(updated_err_est, 0.0,
2 * std::numeric_limits<double>::epsilon());
// Verify the solution too.
EXPECT_NEAR(scaled_linear_context->get_continuous_state()
.get_vector()
.CopyToVector()[0],
scaled_linear.Evaluate(updated_t_final),
10 * std::numeric_limits<double>::epsilon());
}
}
TYPED_TEST_P(ImplicitIntegratorTest, DoubleSpringMassDamperNoReuse) {
this->DoubleSpringMassDamperTest(kNoReuse);
}
TYPED_TEST_P(ImplicitIntegratorTest, DoubleSpringMassDamperReuse) {
this->DoubleSpringMassDamperTest(kReuse);
}
TYPED_TEST_P(ImplicitIntegratorTest, SpringMassDamperStiffNoReuse) {
this->SpringMassDamperStiffTest(kNoReuse);
}
TYPED_TEST_P(ImplicitIntegratorTest, SpringMassDamperStiffReuse) {
this->SpringMassDamperStiffTest(kReuse);
}
TYPED_TEST_P(ImplicitIntegratorTest, DiscontinuousSpringMassDamperNoReuse) {
this->DiscontinuousSpringMassDamperTest(kNoReuse);
}
TYPED_TEST_P(ImplicitIntegratorTest, DiscontinuousSpringMassDamperReuse) {
this->DiscontinuousSpringMassDamperTest(kReuse);
}
TYPED_TEST_P(ImplicitIntegratorTest, SpringMassStepNoReuse) {
this->SpringMassStepTest(kNoReuse);
}
TYPED_TEST_P(ImplicitIntegratorTest, SpringMassStepReuse) {
this->SpringMassStepTest(kReuse);
}
TYPED_TEST_P(ImplicitIntegratorTest, ErrorEstimationNoReuse) {
this->ErrorEstimationTest(kNoReuse);
}
TYPED_TEST_P(ImplicitIntegratorTest, ErrorEstimationReuse) {
this->ErrorEstimationTest(kReuse);
}
TYPED_TEST_P(ImplicitIntegratorTest, SpringMassStepAccuracyEffectsNoReuse) {
this->SpringMassStepAccuracyEffectsTest(kNoReuse);
}
TYPED_TEST_P(ImplicitIntegratorTest, SpringMassStepAccuracyEffectsReuse) {
this->SpringMassStepAccuracyEffectsTest(kReuse);
}
REGISTER_TYPED_TEST_SUITE_P(
ImplicitIntegratorTest, Reuse, FullNewton, MiscAPINoReuse, MiscAPIReuse,
Stationary, Robertson, FixedStepThrowsOnMultiStep, ContextAccess,
AccuracyEstAndErrorControl, LinearTest, DoubleSpringMassDamperNoReuse,
DoubleSpringMassDamperReuse, SpringMassDamperStiffNoReuse,
SpringMassDamperStiffReuse, DiscontinuousSpringMassDamperNoReuse,
DiscontinuousSpringMassDamperReuse, SpringMassStepNoReuse,
SpringMassStepReuse, ErrorEstimationNoReuse, ErrorEstimationReuse,
SpringMassStepAccuracyEffectsNoReuse, SpringMassStepAccuracyEffectsReuse);
} // namespace analysis_test
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems/analysis | /home/johnshepherd/drake/systems/analysis/test_utilities/pleides_system.h | #pragma once
#include <cmath>
#include <vector>
#include "drake/systems/framework/leaf_system.h"
namespace drake {
namespace systems {
namespace analysis {
namespace test {
using Vector2d = Vector2<double>;
/// A system of ODEs that can be used to test performance of the initial value
/// problem (IVP) solvers. This problem corresponds to the "n-body-problem"
/// (where n = 7); in short, given initial positions and velocities of seven
/// particles that move according to Newtonian Mechanics (F=ma) and subject to
/// inverse-square gravitational forces, compute their positions and velocities
/// at a given time in the future. One potentially useful aspect of this problem
/// is that it is energy conserving. The problem setup and data are taken from:
/// https://archimede.dm.uniba.it/~testset/report/plei.pdf, which is part of the
/// IVP benchmark suite described in:
///
/// F.Mazzia and C.Magherini. Test Set for Initial Value Problem Solvers,
/// release 2.4. Department of Mathematics, University of Bari and INdAM,
/// Research Unit of Bari, February 2008. http://www.dm.uniba.it/~testset.
class PleidesSystem : public LeafSystem<double> {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(PleidesSystem)
PleidesSystem() {
const int nq = 14; // Number of generalized positions.
const int nv = 14; // Number of generalized velocities.
const int nz = 0; // No additional state variables.
this->DeclareContinuousState(nq, nv, nz);
// Set masses (as defined in plei.pdf).
for (int i = 0; i < kNumParticles; ++i) mass_[i] = i + 1;
}
// Number of particles being simulated.
constexpr static int kNumParticles = 7;
void SetDefaultState(
const Context<double>& context, State<double>* state) const override {
VectorX<double> q(kNumParticles * 2), v(kNumParticles * 2);
// Set the initial positions. Note that these are all representable in
// IEEE 754 without any representation error. Each pair of initial positions
// corresponds to the (x, y) location of a particle. From plei.pdf,
// the first particle's initial position is x0 = q[0] = 3, y0 = q[7] = 3.
q.head(kNumParticles)[0] = 3.0; q.tail(kNumParticles)[0] = 3.0;
q.head(kNumParticles)[1] = 3.0; q.tail(kNumParticles)[1] = -3.0;
q.head(kNumParticles)[2] = -1.0; q.tail(kNumParticles)[2] = 2.0;
q.head(kNumParticles)[3] = -3.0; q.tail(kNumParticles)[3] = 0.0;
q.head(kNumParticles)[4] = 2.0; q.tail(kNumParticles)[4] = 0.0;
q.head(kNumParticles)[5] = -2.0; q.tail(kNumParticles)[5] = -4.0;
q.head(kNumParticles)[6] = 2.0; q.tail(kNumParticles)[6] = 4.0;
// Set the initial velocities. Uses the same representation layout as with
// the positions.
v.head(kNumParticles)[0] = 0.0; v.tail(kNumParticles)[0] = 0.0;
v.head(kNumParticles)[1] = 0.0; v.tail(kNumParticles)[1] = 0.0;
v.head(kNumParticles)[2] = 0.0; v.tail(kNumParticles)[2] = 0.0;
v.head(kNumParticles)[3] = 0.0; v.tail(kNumParticles)[3] = -1.25;
v.head(kNumParticles)[4] = 0.0; v.tail(kNumParticles)[4] = 1.0;
v.head(kNumParticles)[5] = 1.75; v.tail(kNumParticles)[5] = 0.0;
v.head(kNumParticles)[6] = -1.5; v.tail(kNumParticles)[6] = 0.0;
state->get_mutable_continuous_state().
get_mutable_generalized_position().SetFromVector(q);
state->get_mutable_continuous_state().
get_mutable_generalized_velocity().SetFromVector(v);
}
void DoCalcTimeDerivatives(const Context<double>& context,
ContinuousState<double>* deriv) const override {
const VectorBase<double>& q = context.get_continuous_state().
get_generalized_position();
const VectorBase<double>& v = context.get_continuous_state().
get_generalized_velocity();
// Get the positions of each particle.
const VectorX<double> x = q.CopyToVector().head(kNumParticles);
const VectorX<double> y = q.CopyToVector().tail(kNumParticles);
// Set the derivatives of the positions.
deriv->get_mutable_generalized_position().SetFrom(v);
// Set the time derivatives of the velocities. The time derivatives are just
// the (gravitational) forces acting on each particle divided by the mass of
// each particle.
VectorX<double> vdot(v.size());
for (int i = 0; i < kNumParticles; ++i) {
Vector2d Fi(0.0, 0.0);
// Accumulate the net forces due to inverse-square law gravitational
// interaction with every other particle. See (II.6.5) in plei.pdf.
for (int j = 0; j < kNumParticles; ++j) {
if (i == j) continue;
const Vector2d rij(x[j] - x[i], y[j] - y[i]);
const double distance = rij.norm();
Fi += G() * (mass_[i] * mass_[j]) * rij /
(distance * distance * distance);
}
vdot.head(kNumParticles)[i] = Fi[0] / mass_[i];
vdot.tail(kNumParticles)[i] = Fi[1] / mass_[i];
deriv->get_mutable_generalized_velocity().SetFromVector(vdot);
}
}
// Gets the end time for integration (to be consistent with plei.pdf).
double get_end_time() const { return 3.0; }
// Non-physical inverse-square law gravitational constant (defined in
// plei.pdf).
double G() const { return 1.0; }
// Gets the system solution *for the positions only*. Only valid for
// time=3.0. Solutions are provided to sixteen decimal digits in plei.pdf.
static VectorX<double> GetSolution(double t) {
DRAKE_DEMAND(t == 3.0);
VectorX<double> sol(kNumParticles * 2);
sol(0) = 0.3706139143970502;
sol(1) = 0.3237284092057233 * 10.0;
sol(2) = -0.3222559032418324 * 10.0;
sol(3) = 0.6597091455775310;
sol(4) = 0.3425581707156584;
sol(5) = 0.1562172101400631 * 10.0;
sol(6) = -0.7003092922212495;
sol(7) = -0.3943437585517392 * 10.0;
sol(8) = -0.3271380973972550 * 10.0;
sol(9) = 0.5225081843456543 * 10.0;
sol(10) = -0.2590612434977470 * 10.0;
sol(11) = 0.1198213693392275 * 10.0;
sol(12) = -0.2429682344935824;
sol(13) = 0.1091449240428980 * 10.0;
return sol;
}
private:
// Mass of each particle.
double mass_[kNumParticles];
};
} // namespace test
} // namespace analysis
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems/analysis | /home/johnshepherd/drake/systems/analysis/test_utilities/stationary_system.cc | #include "drake/systems/analysis/test_utilities/stationary_system.h"
#include "drake/systems/framework/context.h"
#include "drake/systems/framework/continuous_state.h"
#include "drake/systems/framework/leaf_system.h"
#include "drake/systems/framework/system_type_tag.h"
namespace drake {
namespace systems {
namespace analysis_test {
template <class T>
StationarySystem<T>::StationarySystem()
: LeafSystem<T>(SystemTypeTag<StationarySystem>{}) {
this->DeclareContinuousState(1 /* num q */, 1 /* num v */, 0 /* num z */);
}
template <class T>
void StationarySystem<T>::DoCalcTimeDerivatives(
const Context<T>&, ContinuousState<T>* derivatives) const {
// State does not evolve.
derivatives->get_mutable_vector().SetAtIndex(0, T(0.0));
derivatives->get_mutable_vector().SetAtIndex(1, T(0.0));
}
} // namespace analysis_test
} // namespace systems
} // namespace drake
DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_SCALARS(
class drake::systems::analysis_test::StationarySystem)
| 0 |
/home/johnshepherd/drake/systems/analysis | /home/johnshepherd/drake/systems/analysis/test_utilities/controlled_spring_mass_system.cc | #include "drake/systems/analysis/test_utilities/controlled_spring_mass_system.h"
#include "drake/common/eigen_types.h"
#include "drake/systems/framework/diagram_builder.h"
#include "drake/systems/primitives/demultiplexer.h"
#include "drake/systems/primitives/multiplexer.h"
using std::make_unique;
namespace drake {
namespace systems {
template <typename T>
PidControlledSpringMassSystem<T>::PidControlledSpringMassSystem(
double spring_stiffness, double mass,
double Kp, double Ki, double Kd,
const T& target_position) : Diagram<T>() {
DRAKE_ASSERT(spring_stiffness >= 0);
DRAKE_ASSERT(mass >= 0);
DRAKE_ASSERT(Kp >= 0);
DRAKE_ASSERT(Ki >= 0);
DRAKE_ASSERT(Kd >= 0);
DiagramBuilder<T> builder;
plant_ = builder.template
AddSystem<SpringMassSystem>(spring_stiffness, mass, true /* is forced */);
plant_->set_name("plant");
controller_ = builder.template AddSystem<controllers::PidController>(
VectorX<double>::Constant(1, Kp), VectorX<double>::Constant(1, Ki),
VectorX<double>::Constant(1, Kd));
controller_->set_name("controller");
VectorX<T> desired(2);
desired << target_position, 0;
target_ = builder.template AddSystem<ConstantVectorSource>(desired);
target_->set_name("target");
// A demultiplexer is used to split the output from the spring-mass system
// into three ports. One port with the mass position and another port with the
// mass velocity so that they can be connected to the controller.
// The third output from the demultiplexer is the spring-mass system's energy
// and it is left unconnected.
auto demux = builder.template AddSystem<Demultiplexer>(3);
demux->set_name("demux");
auto mux = builder.template AddSystem<Multiplexer>(2);
mux->set_name("mux");
builder.Connect(plant_->get_output_port(),
demux->get_input_port(0));
builder.Connect(demux->get_output_port(0),
mux->get_input_port(0));
builder.Connect(demux->get_output_port(1),
mux->get_input_port(1));
// Connects the estimated state to PID.
builder.Connect(mux->get_output_port(0),
controller_->get_input_port_estimated_state());
// Connects the desired state to PID.
builder.Connect(target_->get_output_port(),
controller_->get_input_port_desired_state());
// Closes the feedback loop.
builder.Connect(controller_->get_output_port_control(),
plant_->get_force_port());
// The output to this system is the output of the spring-mass system which
// consists of a vector with position, velocity and energy.
builder.ExportOutput(plant_->get_output_port());
builder.BuildInto(this);
}
template <typename T>
T PidControlledSpringMassSystem<T>::get_position(
const Context<T>& context) const {
const Context<T>& plant_context =
Diagram<T>::GetSubsystemContext(*plant_, context);
return plant_->get_position(plant_context);
}
template <typename T>
T PidControlledSpringMassSystem<T>::get_velocity(
const Context<T>& context) const {
const Context<T>& plant_context =
Diagram<T>::GetSubsystemContext(*plant_, context);
return plant_->get_velocity(plant_context);
}
template <typename T>
T PidControlledSpringMassSystem<T>::get_conservative_work(
const Context<T>& context) const {
const Context<T>& plant_context =
Diagram<T>::GetSubsystemContext(*plant_, context);
return plant_->get_conservative_work(plant_context);
}
template <typename T>
void PidControlledSpringMassSystem<T>::set_position(
Context<T>* context, const T& position) const {
Context<T>& plant_context =
Diagram<T>::GetMutableSubsystemContext(*plant_, context);
plant_->set_position(&plant_context, position);
}
template <typename T>
void PidControlledSpringMassSystem<T>::set_velocity(
Context<T>* context, const T& velocity) const {
Context<T>& plant_context =
Diagram<T>::GetMutableSubsystemContext(*plant_, context);
plant_->set_velocity(&plant_context, velocity);
}
template <typename T>
const SpringMassSystem<T>& PidControlledSpringMassSystem<T>::get_plant()
const {
return *plant_;
}
} // namespace systems
} // namespace drake
DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS(
class drake::systems::PidControlledSpringMassSystem)
| 0 |
/home/johnshepherd/drake/systems/analysis | /home/johnshepherd/drake/systems/analysis/test_utilities/quintic_scalar_system.h | #pragma once
#include "drake/systems/framework/context.h"
#include "drake/systems/framework/leaf_system.h"
#include "drake/systems/framework/state.h"
namespace drake {
namespace systems {
namespace analysis_test {
/// System where the state at (scalar) time t corresponds to the quintic
/// equation t⁵ + 2t⁴ + 3t³ + 4t² + 5t + 6.
class QuinticScalarSystem : public LeafSystem<double> {
public:
QuinticScalarSystem() { this->DeclareContinuousState(1); }
/// Evaluates the system at time t.
double Evaluate(double t) const {
return t * (t * (t * (t * (t + 2) + 3) + 4) + 5) + 6;
}
private:
void SetDefaultState(const Context<double>& context,
State<double>* state) const final {
const double t0 = 0.0;
state->get_mutable_continuous_state().get_mutable_vector()[0] =
Evaluate(t0);
}
void DoCalcTimeDerivatives(const Context<double>& context,
ContinuousState<double>* deriv) const override {
const double t = context.get_time();
(*deriv)[0] = t * (t * (t * (5*t + 8) + 9) + 8) + 5;
}
};
} // namespace analysis_test
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems/analysis | /home/johnshepherd/drake/systems/analysis/test_utilities/spring_mass_damper_system.h | #pragma once
#include <limits>
#include <utility>
#include "drake/systems/analysis/test_utilities/spring_mass_system.h"
// WARNING WARNING WARNING
// This test is currently used only as a stiff system test for implicit
// integration.
// TODO(edrumwri): This test should be upgraded to a reusable, closed-form
// benchmark by integrating this class with SpringMassSystem.
// See issue #6146.
namespace drake {
namespace systems {
namespace implicit_integrator_test {
// This is an unforced spring-mass-damper system.
template <class T>
class SpringMassDamperSystem : public SpringMassSystem<T> {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(SpringMassDamperSystem);
/// Constructs an unforced spring-mass-damper system.
/// Subclasses must use the protected constructor, not this one.
SpringMassDamperSystem(double spring_constant_N_per_m,
double damping_constant_Ns_per_m,
double mass_kg)
: SpringMassDamperSystem<T>(
SystemTypeTag<SpringMassDamperSystem>{},
spring_constant_N_per_m, damping_constant_Ns_per_m, mass_kg) {}
/// Scalar-converting copy constructor. See @ref system_scalar_conversion.
template <typename U>
explicit SpringMassDamperSystem(const SpringMassDamperSystem<U>& other)
: SpringMassDamperSystem(
other.get_spring_constant(),
other.get_damping_constant(),
other.get_mass()) {}
/// Returns the damping constant that was provided at construction in Ns/m
double get_damping_constant() const { return damping_constant_Ns_per_m_; }
/// Returns the closed-form position and velocity solution for the unforced
/// spring-mass-damper from the given initial conditions *for the case that
/// the spring-mass-damper is not underdamped*. In other words, this function
/// requires that `c² - 4⋅m⋅k ≥ 0`, where c is the damping coefficient,
/// m is the mass, and k is the spring coefficient. Put yet another way,
/// the damping ratio must be greater than one (i.e., ξ = c/2sqrt(km)) > 1).
/// @param x0 the position of the spring at time t = 0.
/// @param v0 the velocity of the spring at time t = 0.
/// @param tf the time at which to return the position and velocity.
/// @param[out] xf the position of the spring at time tf, on return.
/// @param[out] vf the velocity of the spring at time tf, on return.
/// @throws std::exception if xf or vf is nullptr or the system is
/// damped, yet underdamped.
void GetClosedFormSolution(const T& x0, const T& v0, const T& tf,
T* xf, T* vf) const {
using std::exp;
if (!xf || !vf)
throw std::logic_error("Passed final position/velocity is null.");
// Special case #1: no damping (uses the closed form solution from
// the mass-spring system).
if (get_damping_constant() == 0) {
SpringMassSystem<T>::GetClosedFormSolution(x0, v0, tf, xf, vf);
return;
}
// TODO(mitiguy): Provide solutions to the underdamped system.
// Special case #2: underdamping.
if (get_damping_constant() * get_damping_constant() <
4 * this->get_mass() * this->get_spring_constant()) {
throw std::logic_error("Closed form solution not available for "
"underdamped system.");
}
// m⋅d²x/dt² + c⋅dx/dt + kx = 0
// Solution to this ODE: x(t) = c₁⋅eʳᵗ + c₂⋅eˢᵗ
// where r and s are the roots to the equation mz² + cz + k = 0.
// Thus, dx/dt = r⋅c₁⋅eʳᵗ + s⋅c₂⋅eˢᵗ.
// Step 1: Solve the equation for z, yielding r and s.
T r, s;
std::tie(r, s) = SolveRestrictedQuadratic(this->get_mass(),
get_damping_constant(),
this->get_spring_constant());
// Step 2: Substituting t = 0 into the equatinons above, solve the resulting
// linear system:
// c1 + c2 = x0
// r⋅c1 + s⋅c2 = v0
// yielding:
// c1 = -(-v0 + s⋅x0)/(r - s) and c2 = -(v0 - r⋅x0)/(r - s)
const T c1 = -(v0 + s * x0) / (r - s);
const T c2 = -(v0 - r * x0) / (r - s);
// Step 3: Set the solutions.
*xf = c1 * exp(r * tf) + c2 * exp(s * tf);
*vf = r * c1 * exp(r * tf) + s * c2 * exp(s * tf);
}
protected:
/// Constructor that specifies @ref system_scalar_conversion support.
SpringMassDamperSystem(SystemScalarConverter converter,
double spring_constant_N_per_m,
double damping_constant_Ns_per_m,
double mass_kg) :
SpringMassSystem<T>(std::move(converter),
spring_constant_N_per_m, mass_kg,
false /* unforced */),
damping_constant_Ns_per_m_(damping_constant_Ns_per_m) {}
void DoCalcTimeDerivatives(const Context <T>& context,
ContinuousState <T>* derivatives) const override {
// Get the current state of the spring.
const ContinuousState<T>& state = context.get_continuous_state();
// First element of the derivative is spring velocity.
const T xd = state[1];
(*derivatives)[0] = xd;
// Compute the force acting on the mass.
const double k = this->get_spring_constant();
const double b = get_damping_constant();
const T x0 = 0;
const T x = state[0];
T force = -k * (x - x0) - b * xd;
// Second element of the derivative is spring acceleration.
(*derivatives)[1] = force / this->get_mass();
// Third element of the derivative is the energy added into the spring.
(*derivatives)[2] = this->CalcConservativePower(context);
}
private:
// Signum function.
static T sgn(const T& x) {
if (x > 0) {
return 1;
} else {
if (x < 0) {
return -1;
} else {
return 0;
}
}
}
// Solves the quadratic equation ax² + bx + c = 0 for x, returned as a pair
// of two values, assuming that b² >= 4ac, a != 0, and b != 0. Cancellation
// error is avoided. Aborts if b² < 4ac, a = 0, or b = 0. This restricted
// quadratic equation solver will work for the test case in this class; *do
// not trust this code to solve generic quadratic equations*.
static std::pair<T, T> SolveRestrictedQuadratic(const T& a, const T& b,
const T& c) {
using std::sqrt;
using std::abs;
const T disc = b*b - 4 * a * c;
DRAKE_DEMAND(disc >= 0);
DRAKE_DEMAND(abs(a) > std::numeric_limits<double>::epsilon());
DRAKE_DEMAND(abs(b) > std::numeric_limits<double>::epsilon());
const T x1 = (-b - sgn(b) * sqrt(disc)) / (2 * a);
const T x2 = c / (a * x1);
return std::make_pair(x1, x2);
}
double damping_constant_Ns_per_m_;
};
} // namespace implicit_integrator_test
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems/analysis | /home/johnshepherd/drake/systems/analysis/test_utilities/stiff_double_mass_spring_system.h | #pragma once
#include <cmath>
#include "drake/systems/framework/leaf_system.h"
namespace drake {
namespace systems {
namespace analysis {
namespace test {
/// A coupled, mass spring system taken from the SD/FAST user's manual.
/// This simple example is used to provide a stiff system for testing
/// implicit integration. Mass cannot be set, nor can spring and damping
/// constants be set.
///
/// The system of ODEs for the double spring mass system follows:<pre>
/// ẍ₁ = (f₁ - f₂)/m₁
/// ẍ₂ = f₂/m₂
/// </pre>
/// where <pre>
/// f₁ = -k₁x₁ - b₁ẋ₁
/// f₂ = -k₂(x₂ - x₁ - 1) - b₂(ẋ₂ - ẋ₁)
/// </pre>
/// and f₁ and f₂ are the spring and damper forces acting between the
/// world/the first mass and the first and second masses, respectively, k are
/// spring constants, b are damping constants, and m are masses. Note
/// that the resting position of the system is at x₁ = 0, x₂ = 1, ẋ₁ = ẋ₂ = 0.
///
/// This system uses m₁ = m₂ and an extremely stiff spring between the two
/// masses (1e20 kg/s²) in order to approximate the masses being rigidly
/// connected. The other spring, which connects the first mass to "the world",
/// uses a much smaller stiffness of 750 kg/s². Damping is currently set to
/// b₁ = b₂ = 0 (i.e., disabled).
template <class T>
class StiffDoubleMassSpringSystem : public LeafSystem<T> {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(StiffDoubleMassSpringSystem)
StiffDoubleMassSpringSystem() {
this->DeclareContinuousState(2 /* num_q */, 2 /* num_v */, 0 /* num_z */);
}
/// Evaluates the spring and damping forces, returning the forces on each
/// body.
Vector2<T> EvalSpringDamperForces(const Context<T>& context) const {
const Eigen::Vector2d k = get_spring_constants();
const Eigen::Vector2d b = get_damping_constants();
const Vector2<T> x = get_position(context);
const Vector2<T> v = get_velocity(context);
const T stretch0 = x(0) - 0;
const T stretch1 = x(1) - x(0) - 1;
// Get the force from Spring 1 and Spring 2.
const T f1 = -k(0) * stretch0 - v(0) * b(0);
const T f2 = -k(1) * stretch1 - (v(1) - v(0)) * b(1);
// Return the force on each body. Spring 1 acts only Body 1. Spring 2
// acts on Body 1 and Body 2.
return Vector2<T>(f1 - f2, f2);
}
/// Gets the two spring constants.
Eigen::Vector2d get_spring_constants() const {
return
Eigen::Vector2d(750, 1e20);
}
/// Gets the two damping constants.
Eigen::Vector2d get_damping_constants() const {
return
Eigen::Vector2d(0, 0);
}
/// Gets the positions of the two point mass bodies.
Vector2<T> get_position(const Context<T>& c) const {
return
c.get_continuous_state().get_generalized_position().CopyToVector();
}
/// Gets the velocity of the two point mass bodies.
Vector2<T> get_velocity(const Context<T>& c) const {
return
c.get_continuous_state().get_generalized_velocity().CopyToVector();
}
/// Gets the mass for the bodies in the system.
Eigen::Vector2d get_mass() const { return Eigen::Vector2d(1.0, 1.0); }
void DoCalcTimeDerivatives(const Context<T>& context,
ContinuousState<T>* deriv) const override {
// Get velocity.
const VectorBase<T>& xd = context.get_continuous_state().
get_generalized_velocity();
// Get the masses and spring and damping coefficients.
const Vector2<T> mass = get_mass();
// Compute the forces.
const Vector2<T> f = EvalSpringDamperForces(context);
// Compute the acceleration.
const Vector2<T> a = f.array() / mass.array();
// Set the derivatives.
deriv->get_mutable_generalized_position().SetFrom(xd);
deriv->get_mutable_generalized_velocity().SetFromVector(a);
}
/// Gets the end time for integration.
T get_end_time() const { return 1e1; }
/// Sets the initial conditions for the system.
/// The first mass will be located at x1 = 0.5 and second will be located at
/// x2 = 1.5. No initial velocity is present.
void SetDefaultState(const Context<T>&,
State<T>* state) const override {
Vector2<T> x, xd;
x(0) = 0.5;
x(1) = 1.5;
xd.setZero();
state->get_mutable_continuous_state().get_mutable_generalized_position().
SetFromVector(x);
state->get_mutable_continuous_state().get_mutable_generalized_velocity().
SetFromVector(xd);
}
/// Gets the solution for the system with initial state defined at @p context,
/// returning the solution at time @p t, in @p state. Aside from the
/// assumption that there is zero initial stretching between the two point
/// masses, initial conditions are arbitrary. The solution is predicated
/// on zero damping (aborts if this is not true).
void GetSolution(const Context<T>& context, const T& t,
ContinuousState<T>* state) const {
const Eigen::Vector2d b = get_damping_constants();
DRAKE_DEMAND(b[0] == b[1] && b[0] == 0.0);
using std::cos;
using std::sin;
// Get the offset between the two bodies
const T offset = state->get_generalized_position().GetAtIndex(1) -
state->get_generalized_position().GetAtIndex(0);
// Omega will use the first body (the one connected to the "world" with the
// non-stiff spring).
const double omega = std::sqrt(get_spring_constants()(0) /
(get_mass()(0) + get_mass()(1)));
// Setup c1 and c2 for ODE constants.
const double c1 = context.get_continuous_state().
get_generalized_position().GetAtIndex(0);
const double c2 = context.get_continuous_state().
get_generalized_velocity().GetAtIndex(0) / omega;
// Set the position and velocity of the first body using the ODE solution.
const double x1_final = c1 * cos(omega * t) + c2 * sin(omega * t);
const double v1_final = c1 * -sin(omega * t) * omega +
c2 * +cos(omega * t) * omega;
state->get_mutable_generalized_position().SetAtIndex(0, x1_final);
state->get_mutable_generalized_velocity().SetAtIndex(0, v1_final);
// The position of the second body should be offset exactly from the first.
state->get_mutable_generalized_position().SetAtIndex(1, x1_final + offset);
// Velocity of the second body should be equal to that of the first body.
state->get_mutable_generalized_velocity().SetAtIndex(1, v1_final);
}
};
} // namespace test
} // namespace analysis
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems/analysis | /home/johnshepherd/drake/systems/analysis/test_utilities/controlled_spring_mass_system.h | #pragma once
#include <memory>
#include "drake/common/default_scalars.h"
#include "drake/common/drake_copyable.h"
#include "drake/systems/analysis/test_utilities/spring_mass_system.h"
#include "drake/systems/controllers/pid_controller.h"
#include "drake/systems/framework/diagram.h"
#include "drake/systems/primitives/constant_vector_source.h"
namespace drake {
namespace systems {
/// A model of a one-dimensional spring-mass system controlled to achieve a
/// given target position using a PID controller.
/// @see SpringMassSystem, PidController.
///
/// @tparam_nonsymbolic_scalar
/// @ingroup rigid_body_systems
template <typename T>
class PidControlledSpringMassSystem : public Diagram<T> {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(PidControlledSpringMassSystem)
/// Constructs a spring-mass system with a fixed spring constant and given
/// mass controlled by a PID controller to achieve a specified target
/// position.
/// @param[in] spring_stiffness The spring constant.
/// @param[in] mass The value of the mass attached to the spring.
/// @param[in] Kp the proportional constant.
/// @param[in] Ki the integral constant.
/// @param[in] Kd the derivative constant.
/// @param[in] target_position the desired target position.
PidControlledSpringMassSystem(double spring_stiffness, double mass,
double Kp, double Ki, double Kd,
const T& target_position);
~PidControlledSpringMassSystem() override {}
T get_position(const Context<T>& context) const;
T get_velocity(const Context<T>& context) const;
T get_conservative_work(const Context<T>& context) const;
/// Sets the position of the mass in the given Context.
void set_position(Context<T>* context, const T& position) const;
/// Sets the velocity of the mass in the given Context.
void set_velocity(Context<T>* context, const T& position) const;
/// Returns the SpringMassSystem plant of the model.
const SpringMassSystem<T>& get_plant() const;
private:
// These are references into the Diagram; no ownership implied.
SpringMassSystem<T>* plant_;
controllers::PidController<T>* controller_;
ConstantVectorSource<T>* target_;
};
} // namespace systems
} // namespace drake
DRAKE_DECLARE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS(
class drake::systems::PidControlledSpringMassSystem)
| 0 |
/home/johnshepherd/drake/systems/analysis | /home/johnshepherd/drake/systems/analysis/test_utilities/spring_mass_system.h | #pragma once
#include <cmath>
#include <stdexcept>
#include "drake/common/drake_copyable.h"
#include "drake/systems/framework/basic_vector.h"
#include "drake/systems/framework/leaf_system.h"
namespace drake {
namespace systems {
/// The state of a one-dimensional spring-mass system, consisting of the
/// position and velocity of the mass, in meters and meters/s.
///
/// @tparam_default_scalar
template <typename T>
class SpringMassStateVector : public BasicVector<T> {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(SpringMassStateVector)
/// @param initial_position The position of the mass in meters.
/// @param initial_velocity The velocity of the mass in meters / second.
SpringMassStateVector(const T& initial_position, const T& initial_velocity);
/// Creates a state with position and velocity set to zero.
SpringMassStateVector();
~SpringMassStateVector() override;
/// Returns the position of the mass in meters, where zero is the point
/// where the spring exerts no force.
T get_position() const;
/// Sets the position of the mass in meters.
void set_position(const T& q);
/// Returns the velocity of the mass in meters per second.
T get_velocity() const;
/// Sets the velocity of the mass in meters per second.
void set_velocity(const T& v);
/// Returns the integral of conservative power, in watts.
T get_conservative_work() const;
/// Initialize the conservative work integral to a given value.
void set_conservative_work(const T& e);
private:
[[nodiscard]] SpringMassStateVector<T>* DoClone() const override;
};
/// A model of a one-dimensional spring-mass system.
///
/// @verbatim
/// |-----\/\/ k /\/\----( m ) +x
/// @endverbatim
/// Units are MKS (meters-kilograms-seconds).
///
/// @tparam_default_scalar
/// @ingroup rigid_body_systems
template <typename T>
class SpringMassSystem : public LeafSystem<T> {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(SpringMassSystem)
/// Constructs a spring-mass system with a fixed spring constant and given
/// mass. Subclasses must use the protected constructor, not this one.
/// @param[in] spring_constant_N_per_m The spring constant in N/m.
/// @param[in] mass_Kg The actual value in Kg of the mass attached to the
/// spring.
/// @param[in] system_is_forced If `true`, the system has an input port for an
/// external force. If `false`, the system has no inputs.
SpringMassSystem(double spring_constant_N_per_m, double mass_kg,
bool system_is_forced = false);
/// Scalar-converting copy constructor. See @ref system_scalar_conversion.
template <typename U>
explicit SpringMassSystem(const SpringMassSystem<U>&);
// Provide methods specific to this System.
/// Returns the input port to the externally applied force.
const InputPort<T>& get_force_port() const;
/// Returns the spring constant k that was provided at construction, in N/m.
double get_spring_constant() const { return spring_constant_N_per_m_; }
/// Returns the mass m that was provided at construction, in kg.
double get_mass() const { return mass_kg_; }
/// Returns true iff the system is forced.
bool get_system_is_forced() const { return system_is_forced_; }
/// Gets the current position of the mass in the given Context.
T get_position(const Context<T>& context) const {
return get_state(context).get_position();
}
/// Gets the current velocity of the mass in the given Context.
T get_velocity(const Context<T>& context) const {
return get_state(context).get_velocity();
}
/// @returns the external driving force to the system.
T get_input_force(const Context<T>& context) const {
T external_force = 0;
DRAKE_ASSERT(system_is_forced_ == (context.num_input_ports() == 1));
if (system_is_forced_) {
external_force = get_force_port().Eval(context)[0];
}
return external_force;
}
/// Gets the current value of the conservative power integral in the given
/// Context.
T get_conservative_work(const Context<T>& context) const {
return get_state(context).get_conservative_work();
}
/// Sets the position of the mass in the given Context.
void set_position(Context<T>* context, const T& position) const {
get_mutable_state(context).set_position(position);
}
/// Sets the velocity of the mass in the given Context.
void set_velocity(Context<T>* context, const T& velocity) const {
get_mutable_state(context).set_velocity(velocity);
}
/// Sets the initial value of the conservative power integral in the given
/// Context.
void set_conservative_work(Context<T>* context, const T& energy) const {
get_mutable_state(context).set_conservative_work(energy);
}
/// Returns the force being applied by the spring to the mass in the given
/// Context. This force f is given by `f = -k (x-x0)`; the spring applies the
/// opposite force -f to the world attachment point at the other end. The
/// force is in newtons N (kg-m/s^2).
T EvalSpringForce(const Context<T>& context) const;
/// Returns the potential energy currently stored in the spring in the given
/// Context. For this linear spring, `pe = k (x-x0)^2 / 2`, so that spring
/// force `f = -k (x-x0)` is the negative gradient of pe. The rate of change
/// of potential energy (that is, power that adding to potential energy) is
/// @verbatim
/// power_pe = d/dt pe
/// = k (x-x0) v
/// = -f v.
/// @endverbatim
/// Energy is in joules J (N-m).
T DoCalcPotentialEnergy(const Context<T>& context) const override;
/// Returns the current kinetic energy of the moving mass in the given
/// Context. This is `ke = m v^2 / 2` for this system. The rate of change of
/// kinetic energy (that is, power that adding to kinetic energy) is
/// @verbatim
/// power_ke = d/dt ke
/// = m v a
/// = m v (f/m)
/// = f v
/// = -power_pe
/// @endverbatim
/// (assuming the only force is due to the spring). Energy is in joules.
/// @see EvalSpringForce(), EvalPotentialEnergy()
T DoCalcKineticEnergy(const Context<T>& context) const override;
/// Returns the rate at which mechanical energy is being converted from
/// potential energy in the spring to kinetic energy of the mass by this
/// spring-mass system in the given Context. For this
/// system, we have conservative power @verbatim
/// power_c = f v
/// = power_ke
/// = -power_pe
/// @endverbatim
/// This quantity is positive when the spring is accelerating the mass and
/// negative when the spring is decelerating the mass.
T DoCalcConservativePower(const Context<T>& context) const override;
// TODO(sherm1) Currently this is a conservative system so there is no power
// generated or consumed. Add some kind of dissipation and/or actuation to
// make this more interesting. Russ suggests adding an Input which is a
// horizontal control force on the mass.
/// Returns power that doesn't involve the conservative spring element. (There
/// is none in this system.)
T DoCalcNonConservativePower(const Context<T>& context) const override;
void DoCalcTimeDerivatives(const Context<T>& context,
ContinuousState<T>* derivatives) const override;
/// Returns the closed-form position and velocity solution for this system
/// from the given initial conditions.
/// @param x0 the position of the spring at time t = 0.
/// @param v0 the velocity of the spring at time t = 0.
/// @param tf the time at which to return the position and velocity.
/// @param[out] xf the position of the spring at time tf, on return.
/// @param[out] vf the velocity of the spring at time tf, on return.
/// @throws std::exception if xf or vf is nullptr or if the system is
/// forced.
void GetClosedFormSolution(const T& x0, const T& v0, const T& tf,
T* xf, T* vf) const {
using std::sqrt;
using std::sin;
using std::cos;
if (!xf || !vf)
throw std::logic_error("Passed final position/velocity is null.");
if (system_is_forced_)
throw std::logic_error("Can only compute closed form solution on "
"unforced system");
// d^2x/dt^2 = -kx/m
// solution to this ODE: x(t) = c1*cos(omega*t) + c2*sin(omega*t)
// where omega = sqrt(k/m)
// ẋ(t) = -c1*sin(omega*t)*omega + c2*cos(omega*t)*omega
// for t = 0, x(0) = c1, ẋ(0) = c2*omega
// Setup c1 and c2 for ODE constants.
const T omega = sqrt(get_spring_constant() / get_mass());
const T c1 = x0;
const T c2 = v0 / omega;
*xf = c1*cos(omega*tf) + c2*sin(omega*tf);
*vf = -c1*sin(omega*tf)*omega + c2*cos(omega*tf)*omega;
}
protected:
/// Constructor that specifies @ref system_scalar_conversion support.
SpringMassSystem(
SystemScalarConverter converter,
double spring_constant_N_per_m,
double mass_kg,
bool system_is_forced);
private:
// This is the calculator method for the output port.
void SetOutputValues(const Context<T>& context,
SpringMassStateVector<T>* output) const;
// TODO(david-german-tri): Add a cast that is dynamic_cast in Debug mode,
// and static_cast in Release mode.
static const SpringMassStateVector<T>& get_state(
const ContinuousState<T>& cstate) {
return dynamic_cast<const SpringMassStateVector<T>&>(cstate.get_vector());
}
static SpringMassStateVector<T>& get_mutable_state(
ContinuousState<T>* cstate) {
return dynamic_cast<SpringMassStateVector<T>&>(
cstate->get_mutable_vector());
}
static const SpringMassStateVector<T>& get_state(const Context<T>& context) {
return get_state(context.get_continuous_state());
}
static SpringMassStateVector<T>& get_mutable_state(Context<T>* context) {
return get_mutable_state(&context->get_mutable_continuous_state());
}
const double spring_constant_N_per_m_{};
const double mass_kg_{};
const bool system_is_forced_{false};
};
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems/analysis | /home/johnshepherd/drake/systems/analysis/test_utilities/cubic_scalar_system.h | #pragma once
#include "drake/systems/framework/context.h"
#include "drake/systems/framework/leaf_system.h"
#include "drake/systems/framework/state.h"
namespace drake {
namespace systems {
namespace analysis_test {
/// System where the state at (scalar) time t corresponds to the cubic equation
/// t³ + t² + 12t + 5.
class CubicScalarSystem : public LeafSystem<double> {
public:
CubicScalarSystem() { this->DeclareContinuousState(1); }
/// Evaluates the system at time t.
double Evaluate(double t) const {
return 5 + t * (t * (t + 1) + 12);
}
private:
void SetDefaultState(
const Context<double>& context, State<double>* state) const final {
const double t0 = 0.0;
state->get_mutable_continuous_state().get_mutable_vector()[0] =
Evaluate(t0);
}
void DoCalcTimeDerivatives(
const Context<double>& context,
ContinuousState<double>* deriv) const override {
const double t = context.get_time();
(*deriv)[0] = 3 * t * t + 2 * t + 12;
}
};
} // namespace analysis_test
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems/analysis | /home/johnshepherd/drake/systems/analysis/test_utilities/discontinuous_spring_mass_damper_system.h | #pragma once
#include "drake/systems/analysis/test_utilities/spring_mass_damper_system.h"
// WARNING WARNING WARNING
// This test is currently used only as a stiff system test for implicit
// integration.
// TODO(edrumwri): This test should be upgraded to a reusable, closed-form
// benchmark by integrating this class with SpringMassSystem.
// See issue #6146.
namespace drake {
namespace systems {
namespace implicit_integrator_test {
// This is a modified spring-mass-damper system for which the acceleration
// component of the derivative function is discontinuous with respect to the
// point mass state. A force of constant magnitude is applied to the
// spring-mass-damper. Tests the ability of an integrator to deal with
// such discontinuities.
template <class T>
class DiscontinuousSpringMassDamperSystem final
: public SpringMassDamperSystem<T> {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(DiscontinuousSpringMassDamperSystem);
DiscontinuousSpringMassDamperSystem(double spring_constant_N_per_m,
double damping_constant_Ns_per_m,
double mass_kg,
double constant_force)
: SpringMassDamperSystem<T>(
SystemTypeTag<DiscontinuousSpringMassDamperSystem>{},
spring_constant_N_per_m,
damping_constant_Ns_per_m,
mass_kg),
constant_force_(constant_force) {
DRAKE_ASSERT(constant_force >= 0.0);
}
/// Scalar-converting copy constructor. See @ref system_scalar_conversion.
template <typename U>
explicit DiscontinuousSpringMassDamperSystem(
const DiscontinuousSpringMassDamperSystem<U>& other)
: DiscontinuousSpringMassDamperSystem(
other.get_spring_constant(),
other.get_damping_constant(),
other.get_mass(),
other.get_constant_force()) {}
/// Gets the magnitude of the constant force acting on the system.
double get_constant_force() const { return constant_force_; }
protected:
void DoCalcTimeDerivatives(const Context <T>& context,
ContinuousState <T>* derivatives) const override {
// Get the current state of the spring.
const ContinuousState<T>& state = context.get_continuous_state();
// First element of the derivative is spring velocity.
const T v = state[1];
(*derivatives)[0] = v;
// Compute the force acting on the mass. There is always a constant
// force pushing the mass toward -inf. The spring and damping forces are
// only active when x <= 0; the spring setpoint is x = 0.
T force = -constant_force_;
const double k = this->get_spring_constant();
const double b = this->get_damping_constant();
const T x0 = 0;
const T x = state[0];
if (x <= x0)
force -= k * (x - x0) + b * v;
// Second element of the derivative is spring acceleration.
(*derivatives)[1] = force / this->get_mass();
// Third element of the derivative is the energy added into the spring.
(*derivatives)[2] = this->CalcConservativePower(context);
}
private:
double constant_force_;
};
} // namespace implicit_integrator_test
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems/analysis | /home/johnshepherd/drake/systems/analysis/test_utilities/quadratic_scalar_system.h | #pragma once
#include "drake/systems/framework/context.h"
#include "drake/systems/framework/leaf_system.h"
#include "drake/systems/framework/state.h"
namespace drake {
namespace systems {
namespace analysis_test {
/// System where the state at (scalar) time t corresponds to the quadratic
/// equation St² + St + 3, where S is a user-defined Scalar (4 by default).
class QuadraticScalarSystem : public LeafSystem<double> {
public:
explicit QuadraticScalarSystem(double S = 4) : S_(S) {
this->DeclareContinuousState(1);
}
/// Evaluates the system at time t.
double Evaluate(double t) const {
return 3 + S_ * t * (t + 1);
}
private:
void SetDefaultState(
const Context<double>& context, State<double>* state) const final {
const double t0 = 0.0;
state->get_mutable_continuous_state().get_mutable_vector()[0] =
Evaluate(t0);
}
void DoCalcTimeDerivatives(
const Context<double>& context,
ContinuousState<double>* deriv) const override {
const double t = context.get_time();
(*deriv)[0] = S_ * (2 * t + 1);
}
// The scaling factor.
double S_{};
};
} // namespace analysis_test
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems/analysis | /home/johnshepherd/drake/systems/analysis/test_utilities/stationary_system.h | #pragma once
#include "drake/common/default_scalars.h"
#include "drake/systems/framework/context.h"
#include "drake/systems/framework/continuous_state.h"
#include "drake/systems/framework/leaf_system.h"
namespace drake {
namespace systems {
namespace analysis_test {
/// System with no state evolution for testing numerical differencing in
/// integrators that use it.
/// @tparam_default_scalar
template <typename T>
class StationarySystem final : public LeafSystem<T> {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(StationarySystem)
StationarySystem();
/// Scalar-converting copy constructor. See @ref system_scalar_conversion.
template <typename U>
explicit StationarySystem(const StationarySystem<U>&) : StationarySystem() {}
protected:
void DoCalcTimeDerivatives(const Context<T>&,
ContinuousState<T>* derivatives) const override;
};
} // namespace analysis_test
} // namespace systems
} // namespace drake
DRAKE_DECLARE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_SCALARS(
class drake::systems::analysis_test::StationarySystem)
| 0 |
/home/johnshepherd/drake/systems/analysis | /home/johnshepherd/drake/systems/analysis/test_utilities/explicit_error_controlled_integrator_test.h | #include "drake/common/test_utilities/expect_no_throw.h"
#pragma once
#include <cmath>
#include <limits>
#include <memory>
#include <gtest/gtest.h>
#include "drake/common/unused.h"
#include "drake/systems/analysis/test_utilities/my_spring_mass_system.h"
#include "drake/systems/analysis/test_utilities/pleides_system.h"
namespace drake {
namespace systems {
namespace analysis_test {
// T is the integrator type (e.g., RungeKutta3Integrator<double>).
template <class T>
struct ExplicitErrorControlledIntegratorTest : public ::testing::Test {
public:
ExplicitErrorControlledIntegratorTest() {
// Create a mass-spring-system with update rate=0.
spring_mass = std::make_unique<analysis_test::MySpringMassSystem<double>>(
kSpringK, kMass, 0.);
context = spring_mass->CreateDefaultContext();
// Create and initialize the integrator.
integrator = std::make_unique<T>(*spring_mass, context.get());
}
std::unique_ptr<analysis_test::MySpringMassSystem<double>> spring_mass;
std::unique_ptr<Context<double>> context;
std::unique_ptr<IntegratorBase<double>> integrator;
const double kDt = 1e-3; // Integration step size.
const double kBigDt = 1e-1; // Big integration step size.
const double kSpringK = 300.0; // N/m
const double kMass = 2.0; // kg
};
TYPED_TEST_SUITE_P(ExplicitErrorControlledIntegratorTest);
TYPED_TEST_P(ExplicitErrorControlledIntegratorTest, ReqInitialStepTarget) {
// Set the requested initial step size.
this->integrator->request_initial_step_size_target(this->kDt);
EXPECT_EQ(this->integrator->get_initial_step_size_target(), this->kDt);
}
TYPED_TEST_P(ExplicitErrorControlledIntegratorTest, ContextAccess) {
this->integrator->get_mutable_context()->SetTime(3.);
EXPECT_EQ(this->integrator->get_context().get_time(), 3.);
EXPECT_EQ(this->context->get_time(), 3.);
}
// Verifies error estimation is supported.
TYPED_TEST_P(ExplicitErrorControlledIntegratorTest, ErrorEstSupport) {
EXPECT_GE(this->integrator->get_error_estimate_order(), 1);
EXPECT_EQ(this->integrator->supports_error_estimation(), true);
DRAKE_EXPECT_NO_THROW(this->integrator->set_target_accuracy(1e-1));
DRAKE_EXPECT_NO_THROW(this->integrator->request_initial_step_size_target(
this->kDt));
}
// Verifies that the stepping works with relatively small
// magnitude step sizes.
TYPED_TEST_P(ExplicitErrorControlledIntegratorTest, MagDisparity) {
this->context->SetTime(0.0);
// Set integrator parameters.
this->integrator->set_maximum_step_size(0.1);
this->integrator->set_requested_minimum_step_size(1e-40);
this->integrator->set_target_accuracy(1e-3);
// Take all the defaults.
this->integrator->Initialize();
// Attempt to take a variable step- should not throw an exception.
DRAKE_EXPECT_NO_THROW(
this->integrator->IntegrateWithMultipleStepsToTime(1e-40));
}
// Test scaling vectors
TYPED_TEST_P(ExplicitErrorControlledIntegratorTest, Scaling) {
// Setting maximum integrator step size is necessary to prevent integrator
// from throwing an exception.
this->integrator->set_maximum_step_size(this->kBigDt);
// Initialize the integrator to set weight vector sizes.
this->integrator->Initialize();
// Test scaling
EXPECT_EQ(this->integrator->get_mutable_generalized_state_weight_vector().
size(), 1);
EXPECT_EQ(this->integrator->get_mutable_generalized_state_weight_vector().
template lpNorm<Eigen::Infinity>(), 1);
EXPECT_EQ(this->integrator->get_misc_state_weight_vector().size(), 1);
EXPECT_EQ(this->integrator->get_mutable_misc_state_weight_vector().
template lpNorm<Eigen::Infinity>(), 1);
}
// Tests the ability to setup the integrator robustly (i.e., with minimal
// user knowledge); in other words, if the user fails to set some aspect of the
// integrator properly, will NaN values make it run forever?
TYPED_TEST_P(ExplicitErrorControlledIntegratorTest, BulletProofSetup) {
// Setup the initial position and initial velocity
const double initial_position = 0.1;
const double initial_velocity = 0.01;
const double omega = std::sqrt(this->kSpringK / this->kMass);
// Set the initial conditions.
this->spring_mass->set_position(this->integrator->get_mutable_context(),
initial_position);
this->spring_mass->set_velocity(this->integrator->get_mutable_context(),
initial_velocity);
// Setup c1 and c2 for ODE constants.
const double c1 = initial_position;
const double c2 = initial_velocity / omega;
// Attempt to initialize the integrator: should throw logic error because
// neither maximum step size nor target accuracy has been set.
EXPECT_THROW(this->integrator->Initialize(), std::logic_error);
// Attempt to initialize the integrator: should throw logic error because
// maximum step size smaller than minimum step size.
this->integrator->set_maximum_step_size(this->kDt);
this->integrator->set_requested_minimum_step_size(this->kBigDt);
EXPECT_THROW(this->integrator->Initialize(), std::logic_error);
// Set step sizes to cogent values and try to initialize again but now using
// bad requested initial step sizes.
this->integrator->set_requested_minimum_step_size(1e-8);
this->integrator->set_maximum_step_size(this->kBigDt);
this->integrator->request_initial_step_size_target(1e-10);
EXPECT_THROW(this->integrator->Initialize(), std::logic_error);
this->integrator->request_initial_step_size_target(this->kBigDt*2.0);
// Set the accuracy to something too loose, set the maximum step size and
// try again. Integrator should now silently adjust the target accuracy to
// the in-use accuracy.
this->integrator->request_initial_step_size_target(this->kDt);
this->integrator->set_target_accuracy(10.0);
this->integrator->Initialize();
EXPECT_LE(this->integrator->get_accuracy_in_use(),
this->integrator->get_target_accuracy());
// Integrate for 1 second using variable stepping.
const double t_final = 1.0;
do {
this->integrator->IntegrateNoFurtherThanTime(t_final, t_final, t_final);
} while (this->context->get_time() < t_final);
// Get the final position.
const double x_final =
this->context->get_continuous_state().get_vector().GetAtIndex(0);
// Check the solution. We're not really looking for accuracy here, just
// want to make sure that the value is finite.
EXPECT_NEAR(
c1 * std::cos(omega * t_final) + c2 * std::sin(omega * t_final),
x_final, 1e0);
}
// Tests the error estimation capabilities.
TYPED_TEST_P(ExplicitErrorControlledIntegratorTest, ErrEstOrder) {
// Setup the initial position and initial velocity.
const double initial_position = 0.1;
const double initial_velocity = 0.01;
const double omega = std::sqrt(this->kSpringK / this->kMass);
// Pick a step size that is much smaller than the period of vibration.
const double period_of_vibration = 2.0 * M_PI / omega;
const double h = period_of_vibration / 512.0;
// Set initial conditions.
this->spring_mass->set_position(this->integrator->get_mutable_context(),
initial_position);
this->spring_mass->set_velocity(this->integrator->get_mutable_context(),
initial_velocity);
// Setup c1 and c2 for ODE constants.
const double c1 = initial_position;
const double c2 = initial_velocity / omega;
// Set integrator parameters: do no error control.
this->integrator->set_maximum_step_size(h);
this->integrator->set_fixed_step_mode(true);
// Initialize the integrator.
this->integrator->Initialize();
// Take a single step of size h.
ASSERT_EQ(this->context->get_time(), 0.0);
const double t_final = this->context->get_time() + h;
ASSERT_TRUE(this->integrator->IntegrateWithSingleFixedStepToTime(t_final));
// Verify that a step of h was taken.
EXPECT_NEAR(this->context->get_time(), h,
std::numeric_limits<double>::epsilon());
// Get the true solution.
const double x_true = c1 * std::cos(omega * h) + c2 * std::sin(omega * h);
// Get the integrator's solution.
const double x_approx_h = this->context->get_continuous_state_vector().
GetAtIndex(0);
// Get the error estimate and the error in the error estimate.
const double err_est_h =
this->integrator->get_error_estimate()->get_vector().GetAtIndex(0);
const double err_est_h_err = std::abs(err_est_h - (x_true - x_approx_h));
// Compute the same solution using two half-steps.
this->context->SetTime(0);
this->spring_mass->set_position(this->integrator->get_mutable_context(),
initial_position);
this->spring_mass->set_velocity(this->integrator->get_mutable_context(),
initial_velocity);
this->integrator->Initialize();
ASSERT_TRUE(this->integrator->IntegrateWithSingleFixedStepToTime(
t_final / 2.0));
ASSERT_TRUE(this->integrator->IntegrateWithSingleFixedStepToTime(t_final));
EXPECT_NEAR(this->context->get_time(), h,
std::numeric_limits<double>::epsilon());
const double x_approx_2h_h = this->context->get_continuous_state_vector().
GetAtIndex(0);
const double err_est_2h_h =
this->integrator->get_error_estimate()->get_vector().GetAtIndex(0);
const double err_est_2h_h_err = std::abs(err_est_2h_h -
(x_true - x_approx_2h_h));
// Verify that the error in the error estimate dropped in accordance with the
// order of the error estimator. Theory indicates that asymptotic error in
// the estimate is bound by K*h^order, where K is some constant and h is
// sufficiently small. We assume a value for K of 4.0 below, and we check that
// the improvement in the error estimate is not as good as K*h^(order+1).
// The K and h might need to be redetermined for a different problem or
// for untested error-controlled integrators.
const double K = 4.0;
const int err_est_order = this->integrator->get_error_estimate_order();
EXPECT_LE(err_est_2h_h_err, K * err_est_h_err / std::pow(2.0, err_est_order));
EXPECT_GE(K * err_est_2h_h_err,
err_est_h_err / std::pow(2.0, err_est_order + 1));
}
// Integrate a purely continuous system with no sampling using error control.
// d^2x/h^2 = -kx/m
// solution to this ODE: x(t) = c1*cos(omega*t) + c2*sin(omega*t)
// where omega = sqrt(k/m)
// x'(t) = -c1*sin(omega*t)*omega + c2*cos(omega*t)*omega
// for t = 0, x(0) = c1, x'(0) = c2*omega
TYPED_TEST_P(ExplicitErrorControlledIntegratorTest, SpringMassStepEC) {
// Set integrator parameters: do no error control.
this->integrator->set_maximum_step_size(this->kDt);
this->integrator->set_fixed_step_mode(true);
// Initialize the integrator.
this->integrator->Initialize();
// Setup the initial position and initial velocity.
const double initial_position = 0.1;
const double initial_velocity = 0.01;
const double omega = std::sqrt(this->kSpringK / this->kMass);
// Set initial conditions.
this->spring_mass->set_position(this->integrator->get_mutable_context(),
initial_position);
this->spring_mass->set_velocity(this->integrator->get_mutable_context(),
initial_velocity);
// Setup c1 and c2 for ODE constants.
const double c1 = initial_position;
const double c2 = initial_velocity / omega;
// Step for 1 second.
const double t_final = 1.0;
for (double t = this->kDt; t <= t_final; t += this->kDt)
this->integrator->IntegrateNoFurtherThanTime(t, t, t);
// At this point, the time will often be 0.999 plus some change. Step one last
// time to take us to 1.0s. If the time happens to already be 1.0s, this
// call will have no effect.
this->integrator->IntegrateNoFurtherThanTime(t_final, t_final, t_final);
// Get the final position.
const double x_final =
this->context->get_continuous_state().get_vector().GetAtIndex(0);
// Store the number of integration steps.
int fixed_steps = this->integrator->get_num_steps_taken();
// Check the solution.
EXPECT_NEAR(
c1 * std::cos(omega * t_final) + c2 * std::sin(omega * t_final),
x_final, 1e-5);
// Reset the integrator and set reasonable parameters for integration with
// error control.
this->integrator->Reset();
this->integrator->set_maximum_step_size(0.1);
this->integrator->set_requested_minimum_step_size(1e-6);
this->integrator->set_target_accuracy(1e-3);
// Re-initialize the integrator.
this->integrator->Initialize();
// Set initial conditions.
this->integrator->get_mutable_context()->SetTime(0.);
this->spring_mass->set_position(this->integrator->get_mutable_context(),
initial_position);
this->spring_mass->set_velocity(this->integrator->get_mutable_context(),
initial_velocity);
// Step for 1 second.
do {
this->integrator->IntegrateNoFurtherThanTime(t_final, t_final, t_final);
} while (this->context->get_time() < t_final);
// Check the solution.
EXPECT_NEAR(
c1 * std::cos(omega * t_final) + c2 * std::sin(omega * t_final),
x_final, 1e-5);
// Verify that integrator statistics are valid.
EXPECT_GE(this->integrator->get_previous_integration_step_size(), 0.0);
EXPECT_GE(this->integrator->get_largest_step_size_taken(), 0.0);
EXPECT_GE(this->integrator->get_smallest_adapted_step_size_taken(), 0.0);
EXPECT_GE(this->integrator->get_num_steps_taken(), 0);
EXPECT_NE(this->integrator->get_error_estimate(), nullptr);
EXPECT_GT(this->integrator->get_num_derivative_evaluations(), 0);
// Verify that less computation was performed compared to the fixed step
// integrator.
EXPECT_LT(this->integrator->get_num_steps_taken(), fixed_steps);
}
// Verifies that the integrator does not alter the state when directed to step
// to the present time.
TYPED_TEST_P(ExplicitErrorControlledIntegratorTest, StepToCurrentTimeNoOp) {
// Set integrator parameters: do error control.
this->integrator->set_maximum_step_size(this->kDt);
this->integrator->set_fixed_step_mode(false);
// Initialize the integrator.
this->integrator->Initialize();
// Setup the initial position and initial velocity.
const double initial_position = 0.1;
const double initial_velocity = 0.01;
// Set initial conditions.
this->spring_mass->set_position(this->integrator->get_mutable_context(),
initial_position);
this->spring_mass->set_velocity(this->integrator->get_mutable_context(),
initial_velocity);
// Integrate to one second.
const double t_final = 1.0;
while (this->context->get_time() < t_final)
this->integrator->IntegrateNoFurtherThanTime(t_final, t_final, t_final);
ASSERT_EQ(this->context->get_time(), t_final);
// Get the final state.
const VectorX<double> x_final =
this->context->get_continuous_state_vector().CopyToVector();
// Call the various stepping methods, ensuring that the state and time do
// not change.
const double inf = std::numeric_limits<double>::infinity();
this->integrator->IntegrateWithMultipleStepsToTime(t_final);
EXPECT_EQ(this->context->get_time(), t_final);
for (int i = 0; i < x_final.size(); ++i)
EXPECT_EQ(x_final[i], this->context->get_continuous_state_vector()[i]);
this->integrator->IntegrateNoFurtherThanTime(inf, inf, t_final);
EXPECT_EQ(this->context->get_time(), t_final);
for (int i = 0; i < x_final.size(); ++i)
EXPECT_EQ(x_final[i], this->context->get_continuous_state_vector()[i]);
// Must do fixed stepping for the last test.
this->integrator->set_fixed_step_mode(true);
ASSERT_TRUE(this->integrator->IntegrateWithSingleFixedStepToTime(t_final));
EXPECT_EQ(this->context->get_time(), t_final);
for (int i = 0; i < x_final.size(); ++i)
EXPECT_EQ(x_final[i], this->context->get_continuous_state_vector()[i]);
}
// Verifies that the maximum step size taken is smaller than the integrator
// max.
TYPED_TEST_P(ExplicitErrorControlledIntegratorTest, MaxStepSizeRespected) {
// Set the initial position and initial velocity such that any step is
// viable.
const double initial_position = 0;
const double initial_velocity = 0;
this->spring_mass->set_position(this->integrator->get_mutable_context(),
initial_position);
this->spring_mass->set_velocity(this->integrator->get_mutable_context(),
initial_velocity);
// Set reasonable parameters for integration with error control.
const double max_step_size = 1e-2;
this->integrator->Reset();
this->integrator->set_maximum_step_size(max_step_size);
this->integrator->set_requested_minimum_step_size(1e-6);
// Initialize the integrator.
this->integrator->Initialize();
// Step for 1/10 second.
const double inf = std::numeric_limits<double>::infinity();
const double eps = std::numeric_limits<double>::epsilon();
const double t_final = 0.1;
do {
// NOTE: this perfect storm of conditions (error controlled integration
// can take the maximum step size, publish time larger than update time,
// update time larger than directed step, directed step larger than maximum
// step size) causes IntegratorBase::StepOnceErrorControlledAtMost() to
// to hang *if* that method does not account for the maximum step size.
this->integrator->IntegrateNoFurtherThanTime(
inf, this->context->get_time() + max_step_size + eps, t_final);
} while (this->context->get_time() < t_final);
// Verify the statistics.
EXPECT_LE(this->integrator->get_largest_step_size_taken(),
max_step_size * this->integrator->get_stretch_factor());
}
// Verify that attempting to take a single fixed step throws an exception.
TYPED_TEST_P(ExplicitErrorControlledIntegratorTest, IllegalFixedStep) {
// Set integrator parameters: do error control.
this->integrator->set_maximum_step_size(this->kDt);
this->integrator->set_fixed_step_mode(false);
// Set accuracy to a really small value so that the step is guaranteed to be
// small.
this->integrator->set_target_accuracy(1e-8);
// Initialize the integrator.
this->integrator->Initialize();
ASSERT_EQ(this->context->get_time(), 0.0);
EXPECT_THROW(unused(
this->integrator->IntegrateWithSingleFixedStepToTime(1e-8)),
std::logic_error);
}
// Verifies statistics validity for error controlled integrator.
TYPED_TEST_P(ExplicitErrorControlledIntegratorTest, CheckStat) {
// Set integrator parameters: do error control.
this->integrator->set_maximum_step_size(this->kDt);
this->integrator->set_fixed_step_mode(false);
// Set accuracy to a really small value so that the step is guaranteed to be
// small.
this->integrator->set_target_accuracy(1e-8);
// Initialize the integrator.
this->integrator->Initialize();
// Set the initial position and initial velocity.
const double initial_position = 0.1;
const double initial_velocity = 0.01;
this->spring_mass->set_position(this->integrator->get_mutable_context(),
initial_position);
this->spring_mass->set_velocity(this->integrator->get_mutable_context(),
initial_velocity);
// Integrate just one step.
const double t_final = this->context->get_time() + this->kDt;
this->integrator->IntegrateNoFurtherThanTime(t_final, t_final, t_final);
// Verify that integrator statistics are valid.
EXPECT_GE(this->integrator->get_previous_integration_step_size(), 0.0);
EXPECT_LE(this->integrator->get_previous_integration_step_size(),
this->kDt);
EXPECT_LE(this->integrator->get_smallest_adapted_step_size_taken(),
this->kDt);
}
// Verifies that dense integration works with error-controlled integration.
TYPED_TEST_P(ExplicitErrorControlledIntegratorTest, DenseOutput) {
this->integrator->set_target_accuracy(1e-8);
// Set an initial time step target that is too large, so that we have step
// size "shrinkages".
this->integrator->request_initial_step_size_target(3.);
this->integrator->set_maximum_step_size(10.);
// Initialize the integrator.
this->integrator->Initialize();
this->integrator->StartDenseIntegration();
// Set the initial position and initial velocity.
const double initial_position = 0.1;
const double initial_velocity = 0.01;
this->spring_mass->set_position(this->integrator->get_mutable_context(),
initial_position);
this->spring_mass->set_velocity(this->integrator->get_mutable_context(),
initial_velocity);
// Integrate and confirm that we had to shrink the steps.
this->integrator->IntegrateWithMultipleStepsToTime(1.);
EXPECT_GE(this->integrator->get_num_step_shrinkages_from_error_control(), 0);
const std::unique_ptr<trajectories::PiecewisePolynomial<double>>
dense_output = this->integrator->StopDenseIntegration();
// Check the dense output.
EXPECT_EQ(dense_output->start_time(), 0.0);
EXPECT_EQ(dense_output->end_time(), 1.0);
EXPECT_EQ(dense_output->get_number_of_segments(),
this->integrator->get_num_steps_taken());
}
REGISTER_TYPED_TEST_SUITE_P(ExplicitErrorControlledIntegratorTest,
ReqInitialStepTarget, ContextAccess, ErrorEstSupport, MagDisparity, Scaling,
BulletProofSetup, ErrEstOrder, SpringMassStepEC, MaxStepSizeRespected,
IllegalFixedStep, CheckStat, DenseOutput, StepToCurrentTimeNoOp);
// T is the integrator type (e.g., RungeKutta3Integrator<double>).
template <class T>
struct PleidesTest : public ::testing::Test {
public:
PleidesTest() {
// Create the Pleides system.
pleides = std::make_unique<analysis::test::PleidesSystem>();
context = pleides->CreateDefaultContext();
// Create the integrator.
integrator = std::make_unique<T>(*pleides, context.get());
}
std::unique_ptr<analysis::test::PleidesSystem> pleides;
std::unique_ptr<Context<double>> context;
std::unique_ptr<IntegratorBase<double>> integrator;
};
TYPED_TEST_SUITE_P(PleidesTest);
// Verifies that the Pleides system can be integrated accurately.
TYPED_TEST_P(PleidesTest, Pleides) {
// Set integrator to use variable-step (not fixed-step) with a tight accuracy
// requirement for each variable step. Due to step size cutting, a variable
// step can be substantially smaller than the initial step size. By default,
// the initial step size is 1/10 of maximum step size (chosen below). We
// request semi-tight accuracy, allowing us to use this test for various
// error controlled integrators.
this->integrator->set_maximum_step_size(0.1);
this->integrator->set_fixed_step_mode(false);
const double requested_local_accuracy = 1e-7;
this->integrator->set_target_accuracy(requested_local_accuracy);
// kTolerance = 100 is a heuristic derived from simulation experiments and
// based on the fact that all tests pass within a tolerance of 25. The
// extra factor of 4 (2 bits) helps ensure the tests also pass on
// various compilers and computer architectures.
const double kTolerance = 100 * requested_local_accuracy;
// Initialize the integrator.
this->integrator->Initialize();
// Simulate to the designated time.
const double t_final = this->pleides->get_end_time();
this->integrator->IntegrateWithMultipleStepsToTime(t_final);
// Check the result.
const VectorX<double> q = this->context->get_continuous_state().
get_generalized_position().CopyToVector();
const VectorX<double> q_des = analysis::test::PleidesSystem::GetSolution(
this->context->get_time());
for (int i = 0; i < q.size(); ++i)
EXPECT_NEAR(q[i], q_des[i], kTolerance) << i;
}
REGISTER_TYPED_TEST_SUITE_P(PleidesTest, Pleides);
} // namespace analysis_test
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems/analysis | /home/johnshepherd/drake/systems/analysis/test_utilities/stateless_system.h | #pragma once
#include <cmath>
#include <memory>
#include <utility>
#include <vector>
#include "drake/systems/framework/leaf_system.h"
#include "drake/systems/framework/witness_function.h"
namespace drake {
namespace systems {
namespace analysis_test {
/// System with no state for testing a simplistic witness function.
template <class T>
class StatelessSystem final : public LeafSystem<T> {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(StatelessSystem)
StatelessSystem(double offset, const WitnessFunctionDirection& dir_type)
: LeafSystem<T>(SystemTypeTag<StatelessSystem>{}),
offset_(offset) {
witness_ = this->MakeWitnessFunction(
"clock witness", dir_type, &StatelessSystem::CalcClockWitness,
&StatelessSystem::InvokePublishCallback);
}
/// Scalar-converting copy constructor. See @ref system_scalar_conversion.
/// @note This function does not preserve the publish callback because
/// this is test code for which it is expected that no one will care
/// whether the publish callback survives transmogrification.
template <typename U>
explicit StatelessSystem(const StatelessSystem<U>& other)
: StatelessSystem<T>(other.get_trigger_time(),
other.witness_->direction_type()) {}
void set_publish_callback(
std::function<void(const Context<T>&)> callback) {
publish_callback_ = callback;
}
/// Gets the time that the witness function triggered.
double get_trigger_time() const { return offset_; }
protected:
void DoGetWitnessFunctions(
const Context<T>&,
std::vector<const WitnessFunction<T>*>* w) const override {
w->push_back(witness_.get());
}
private:
// Allow different specializations to access each other's private data.
template <typename> friend class StatelessSystem;
// The witness function is the time value itself less the offset value.
T CalcClockWitness(const Context<T>& context) const {
return context.get_time() - offset_;
}
void InvokePublishCallback(const Context<T>& context,
const PublishEvent<T>&) const {
if (this->publish_callback_ != nullptr) this->publish_callback_(context);
}
const double offset_;
std::unique_ptr<WitnessFunction<T>> witness_;
std::function<void(const Context<T>&)> publish_callback_{nullptr};
};
} // namespace analysis_test
} // namespace systems
} // namespace drake
| 0 |
/home/johnshepherd/drake/systems/analysis/test_utilities | /home/johnshepherd/drake/systems/analysis/test_utilities/test/controlled_spring_mass_system_test.cc | #include "drake/systems/analysis/test_utilities/controlled_spring_mass_system.h"
#include "gtest/gtest.h"
#include <Eigen/Dense>
using std::make_unique;
namespace drake {
namespace systems {
namespace {
const double kSpring = 300.0; // N/m
const double kMass = 2.0; // kg
const double kProportionalConstant = 1.0; // N/m
const double kDerivativeConstant = 1.0; // N*s/m
const double kIntegralConstant = 1.0; // N/(m*s)
const double kTargetPosition = 1.0; // m
// A unit test fixture to evaluate the correct functioning of the
// PidControlledSpringMassSystem example.
class SpringMassSystemTest : public ::testing::Test {
protected:
void SetUp() override {
model_ =
make_unique<PidControlledSpringMassSystem<double>>(
kSpring, kMass,
kProportionalConstant, kIntegralConstant, kDerivativeConstant,
kTargetPosition);
model_context_ = model_->CreateDefaultContext();
output_ = model_->AllocateOutput();
// Gets the plant subcontext.
plant_context_ =
&model_->GetMutableSubsystemContext(
model_->get_plant(), model_context_.get());
}
std::unique_ptr<PidControlledSpringMassSystem<double>> model_;
std::unique_ptr<Context<double>> model_context_;
Context<double>* plant_context_;
std::unique_ptr<SystemOutput<double>> output_;
};
// Tests the correct output from the model.
TEST_F(SpringMassSystemTest, EvalOutput) {
const double x0 = 2.0;
const double v0 = -1.0;
// Sets a non-zero initial condition.
model_->set_position(model_context_.get(), x0);
model_->set_velocity(model_context_.get(), v0);
ASSERT_EQ(1, output_->num_ports());
model_->CalcOutput(*model_context_, output_.get());
// Output equals the state of the spring-mass plant being controlled which
// consists of position, velocity and energy.
Eigen::Vector3d expected_output(x0, v0, 0.0);
const BasicVector<double>* output = output_->get_vector_data(0);
ASSERT_NE(nullptr, output);
EXPECT_EQ(expected_output[0], output->get_value()[0]);
EXPECT_EQ(expected_output[1], output->get_value()[1]);
EXPECT_EQ(expected_output[2], output->get_value()[2]);
}
TEST_F(SpringMassSystemTest, EvalTimeDerivatives) {
const double x0 = 2.0;
const double v0 = -1.5;
// Sets a non-zero initial condition.
model_->set_position(model_context_.get(), x0);
model_->set_velocity(model_context_.get(), v0);
std::unique_ptr<ContinuousState<double>> derivatives =
model_->AllocateTimeDerivatives();
model_->CalcTimeDerivatives(*model_context_, derivatives.get());
// The spring-mass plant has a state vector of size 3. One position, one
// velocity and one miscellaneous state (energy). Moreover, the model has an
// additional miscellaneous state corresponding to the integral of the PID
// controller.Therefore the size of the misc state vector is 2.
ASSERT_EQ(4, derivatives->size());
ASSERT_EQ(1, derivatives->get_generalized_position().size());
ASSERT_EQ(1, derivatives->get_generalized_velocity().size());
ASSERT_EQ(2, derivatives->get_misc_continuous_state().size());
// The derivatives of plant.
const ContinuousState<double>& plant_xcdot =
model_->GetSubsystemDerivatives(model_->get_plant(), *derivatives);
// Position derivative.
EXPECT_EQ(v0, plant_xcdot.get_vector().GetAtIndex(0));
// Acceleration.
const double error = x0 - kTargetPosition;
const double error_rate = v0; // target velocity is zero.
const double pid_actuation =
kProportionalConstant * error + kDerivativeConstant * error_rate;
EXPECT_EQ((-kSpring * x0 - pid_actuation) / kMass,
plant_xcdot.get_vector().GetAtIndex(1));
// Power.
EXPECT_EQ(model_->get_plant().EvalConservativePower(*plant_context_),
plant_xcdot.get_vector().GetAtIndex(2));
}
TEST_F(SpringMassSystemTest, DirectFeedthrough) {
EXPECT_FALSE(model_->HasAnyDirectFeedthrough());
}
} // namespace
} // namespace systems
} // namespace drake
| 0 |
Subsets and Splits