8_cythonno_x86_64_10_64 | test_system_auth_ks_is_alterable | Failure | cassandra.DriverException: Keyspace metadata was not refreshed. See log for details.
self = <auth_test.TestAuth object at 0x7ff21a586a00>
def test_system_auth_ks_is_alterable(self):
"""
* Launch a three node cluster
* Verify the default RF of system_auth is 1
* Increase the system_auth RF to 3
* Run repair, see 10655
* Restart the cluster
* Check that each node agrees on the system_auth RF
@jira_ticket CASSANDRA-10655
"""
self.prepare(nodes=3)
logger.debug("nodes started")
session = self.get_session(user='cassandra', password='cassandra')
auth_metadata = UpdatingKeyspaceMetadataWrapper(
cluster=session.cluster,
ks_name='system_auth',
max_schema_agreement_wait=60 # 6x the default of 10
)
assert 1 == auth_metadata.replication_strategy.replication_factor
session.execute("""
ALTER KEYSPACE system_auth
WITH replication = {'class':'SimpleStrategy', 'replication_factor':3};
""")
> assert 3 == auth_metadata.replication_strategy.replication_factor
auth_test.py:102:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tools/metadata_wrapper.py:10: in __getattr__
return getattr(self._wrapped, name)
tools/metadata_wrapper.py:57: in _wrapped
self._cluster.refresh_keyspace_metadata(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <cassandra.cluster.Cluster object at 0x7ff21a018b80>
keyspace = 'system_auth', max_schema_agreement_wait = 60
def refresh_keyspace_metadata(self, keyspace, max_schema_agreement_wait=None):
"""
Synchronously refresh keyspace metadata. This applies to keyspace-level information such as replication
and durability settings. It does not refresh tables, types, etc. contained in the keyspace.
See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior
"""
if not self.control_connection.refresh_schema(target_type=SchemaTargetType.KEYSPACE, keyspace=keyspace,
schema_agreement_wait=max_schema_agreement_wait, force=True):
> raise DriverException("Keyspace metadata was not refreshed. See log for details.")
E cassandra.DriverException: Keyspace metadata was not refreshed. See log for details.
../cassandra/build/venv/src/cassandra-driver/cassandra/cluster.py:2162: DriverException | 46.245 |
8_cythonno_x86_64_31_64 | test_change_durable_writes | Failure | AssertionError: Commitlog was written with durable writes disabled
assert 163840 == 90112
+163840
-90112
self = <configuration_test.TestConfiguration object at 0x7fcab9efe4f0>
@pytest.mark.timeout(60*30)
def test_change_durable_writes(self):
"""
@jira_ticket CASSANDRA-9560
Test that changes to the DURABLE_WRITES option on keyspaces is
respected in subsequent writes.
This test starts by writing a dataset to a cluster and asserting that
the commitlogs have been written to. The subsequent test depends on
the assumption that this dataset triggers an fsync.
After checking this assumption, the test destroys the cluster and
creates a fresh one. Then it tests that DURABLE_WRITES is respected by:
- creating a keyspace with DURABLE_WRITES set to false,
- using ALTER KEYSPACE to set its DURABLE_WRITES option to true,
- writing a dataset to this keyspace that is known to trigger a commitlog fsync,
- asserting that the commitlog has grown in size since the data was written.
"""
cluster = self.cluster
cluster.set_batch_commitlog(enabled=True, use_batch_window = cluster.version() < '5.0')
cluster.set_configuration_options(values={'commitlog_segment_size_in_mb': 1})
cluster.populate(1).start()
durable_node = cluster.nodelist()[0]
durable_init_size = commitlog_size(durable_node)
durable_session = self.patient_exclusive_cql_connection(durable_node)
# test assumption that write_to_trigger_fsync actually triggers a commitlog fsync
durable_session.execute("CREATE KEYSPACE ks WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': 1} "
"AND DURABLE_WRITES = true")
durable_session.execute('CREATE TABLE ks.tab (key int PRIMARY KEY, a int, b int, c int)')
logger.debug('commitlog size diff = ' + str(commitlog_size(durable_node) - durable_init_size))
write_to_trigger_fsync(durable_session, 'ks', 'tab')
logger.debug('commitlog size diff = ' + str(commitlog_size(durable_node) - durable_init_size))
assert commitlog_size(durable_node) > durable_init_size, \
"This test will not work in this environment; write_to_trigger_fsync does not trigger fsync."
durable_session.shutdown()
cluster.stop()
cluster.clear()
cluster.set_batch_commitlog(enabled=True, use_batch_window = cluster.version() < '5.0')
cluster.set_configuration_options(values={'commitlog_segment_size_in_mb': 1})
cluster.start()
node = cluster.nodelist()[0]
session = self.patient_exclusive_cql_connection(node)
# set up a keyspace without durable writes, then alter it to use them
session.execute("CREATE KEYSPACE ks WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': 1} "
"AND DURABLE_WRITES = false")
session.execute('CREATE TABLE ks.tab (key int PRIMARY KEY, a int, b int, c int)')
init_size = commitlog_size(node)
write_to_trigger_fsync(session, 'ks', 'tab')
> assert commitlog_size(node) == init_size, "Commitlog was written with durable writes disabled"
E AssertionError: Commitlog was written with durable writes disabled
E assert 163840 == 90112
E +163840
E -90112
configuration_test.py:104: AssertionError | 93.042 |