package org.apache.cassandra.db;
import java.io.File;
import java.io.IOError;
import java.io.IOException;
import java.net.InetAddress;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import javax.management.openmbean.OpenDataException;
import javax.management.openmbean.TabularData;
import java.util.concurrent.Future;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.SetMultimap;
import com.google.common.collect.Sets;
import com.google.common.io.ByteStreams;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.util.concurrent.Futures;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.config.SchemaConstants;
import org.apache.cassandra.cql3.QueryProcessor;
import org.apache.cassandra.cql3.UntypedResultSet;
import org.apache.cassandra.cql3.functions.*;
import org.apache.cassandra.db.commitlog.CommitLogPosition;
import org.apache.cassandra.db.compaction.CompactionHistoryTabularData;
import org.apache.cassandra.db.marshal.*;
import org.apache.cassandra.db.rows.Rows;
import org.apache.cassandra.db.partitions.PartitionUpdate;
import org.apache.cassandra.dht.*;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.io.sstable.Descriptor;
import org.apache.cassandra.io.util.*;
import org.apache.cassandra.locator.IEndpointSnitch;
import org.apache.cassandra.metrics.RestorableMeter;
import org.apache.cassandra.net.MessagingService;
import org.apache.cassandra.schema.*;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.service.paxos.Commit;
import org.apache.cassandra.service.paxos.PaxosState;
import org.apache.cassandra.thrift.cassandraConstants;
import org.apache.cassandra.transport.ProtocolVersion;
import org.apache.cassandra.utils.*;
import static java.util.Collections.emptyMap;
import static java.util.Collections.singletonMap;
import static org.apache.cassandra.cql3.QueryProcessor.executeInternal;
import static org.apache.cassandra.cql3.QueryProcessor.executeOnceInternal;
import static org.apache.cassandra.io.util.FileUtils.visitDirectory;
public final class SystemKeyspace
{
private SystemKeyspace()
{
}
private static final Logger logger = LoggerFactory.getLogger(SystemKeyspace.class);
public static final CassandraVersion UNREADABLE_VERSION = new CassandraVersion("0.0.0-unknown");
public static final CassandraVersion NULL_VERSION = new CassandraVersion("0.0.0-absent");
public static final String BATCHES = "batches";
public static final String PAXOS = "paxos";
public static final String BUILT_INDEXES = "IndexInfo";
public static final String LOCAL = "local";
public static final String PEERS = "peers";
public static final String PEER_EVENTS = "peer_events";
public static final String RANGE_XFERS = "range_xfers";
public static final String COMPACTION_HISTORY = "compaction_history";
public static final String SSTABLE_ACTIVITY = "sstable_activity";
public static final String SIZE_ESTIMATES = "size_estimates";
public static final String AVAILABLE_RANGES = "available_ranges";
public static final String TRANSFERRED_RANGES = "transferred_ranges";
public static final String VIEWS_BUILDS_IN_PROGRESS = "views_builds_in_progress";
public static final String BUILT_VIEWS = "built_views";
public static final String PREPARED_STATEMENTS = "prepared_statements";
@Deprecated public static final String LEGACY_HINTS = "hints";
@Deprecated public static final String LEGACY_BATCHLOG = "batchlog";
@Deprecated public static final String LEGACY_KEYSPACES = "schema_keyspaces";
@Deprecated public static final String LEGACY_COLUMNFAMILIES = "schema_columnfamilies";
@Deprecated public static final String LEGACY_COLUMNS = "schema_columns";
@Deprecated public static final String LEGACY_TRIGGERS = "schema_triggers";
@Deprecated public static final String LEGACY_USERTYPES = "schema_usertypes";
@Deprecated public static final String LEGACY_FUNCTIONS = "schema_functions";
@Deprecated public static final String LEGACY_AGGREGATES = "schema_aggregates";
public static final CFMetaData Batches =
compile(BATCHES,
"batches awaiting replay",
"CREATE TABLE %s ("
+ "id timeuuid,"
+ "mutations list<blob>,"
+ "version int,"
+ "PRIMARY KEY ((id)))")
.copy(new LocalPartitioner(TimeUUIDType.instance))
.compaction(CompactionParams.scts(singletonMap("min_threshold", "2")))
.gcGraceSeconds(0);
private static final CFMetaData Paxos =
compile(PAXOS,
"in-progress paxos proposals",
"CREATE TABLE %s ("
+ "row_key blob,"
+ "cf_id UUID,"
+ "in_progress_ballot timeuuid,"
+ "most_recent_commit blob,"
+ "most_recent_commit_at timeuuid,"
+ "most_recent_commit_version int,"
+ "proposal blob,"
+ "proposal_ballot timeuuid,"
+ "proposal_version int,"
+ "PRIMARY KEY ((row_key), cf_id))")
.compaction(CompactionParams.lcs(emptyMap()));
private static final CFMetaData BuiltIndexes =
compile(BUILT_INDEXES,
"built column indexes",
"CREATE TABLE \"%s\" ("
+ "table_name text,"
+ "index_name text,"
+ "PRIMARY KEY ((table_name), index_name)) "
+ "WITH COMPACT STORAGE");
private static final CFMetaData Local =
compile(LOCAL,
"information about the local node",
"CREATE TABLE %s ("
+ "key text,"
+ "bootstrapped text,"
+ "broadcast_address inet,"
+ "cluster_name text,"
+ "cql_version text,"
+ "data_center text,"
+ "gossip_generation int,"
+ "host_id uuid,"
+ "listen_address inet,"
+ "native_protocol_version text,"
+ "partitioner text,"
+ "rack text,"
+ "release_version text,"
+ "rpc_address inet,"
+ "schema_version uuid,"
+ "thrift_version text,"
+ "tokens set<varchar>,"
+ "truncated_at map<uuid, blob>,"
+ "PRIMARY KEY ((key)))");
private static final CFMetaData Peers =
compile(PEERS,
"information about known peers in the cluster",
"CREATE TABLE %s ("
+ "peer inet,"
+ "data_center text,"
+ "host_id uuid,"
+ "preferred_ip inet,"
+ "rack text,"
+ "release_version text,"
+ "rpc_address inet,"
+ "schema_version uuid,"
+ "tokens set<varchar>,"
+ "PRIMARY KEY ((peer)))");
private static final CFMetaData PeerEvents =
compile(PEER_EVENTS,
"events related to peers",
"CREATE TABLE %s ("
+ "peer inet,"
+ "hints_dropped map<uuid, int>,"
+ "PRIMARY KEY ((peer)))");
private static final CFMetaData RangeXfers =
compile(RANGE_XFERS,
"ranges requested for transfer",
"CREATE TABLE %s ("
+ "token_bytes blob,"
+ "requested_at timestamp,"
+ "PRIMARY KEY ((token_bytes)))");
private static final CFMetaData CompactionHistory =
compile(COMPACTION_HISTORY,
"week-long compaction history",
"CREATE TABLE %s ("
+ "id uuid,"
+ "bytes_in bigint,"
+ "bytes_out bigint,"
+ "columnfamily_name text,"
+ "compacted_at timestamp,"
+ "keyspace_name text,"
+ "rows_merged map<int, bigint>,"
+ "PRIMARY KEY ((id)))")
.defaultTimeToLive((int) TimeUnit.DAYS.toSeconds(7));
private static final CFMetaData SSTableActivity =
compile(SSTABLE_ACTIVITY,
"historic sstable read rates",
"CREATE TABLE %s ("
+ "keyspace_name text,"
+ "columnfamily_name text,"
+ "generation int,"
+ "rate_120m double,"
+ "rate_15m double,"
+ "PRIMARY KEY ((keyspace_name, columnfamily_name, generation)))");
private static final CFMetaData SizeEstimates =
compile(SIZE_ESTIMATES,
"per-table primary range size estimates",
"CREATE TABLE %s ("
+ "keyspace_name text,"
+ "table_name text,"
+ "range_start text,"
+ "range_end text,"
+ "mean_partition_size bigint,"
+ "partitions_count bigint,"
+ "PRIMARY KEY ((keyspace_name), table_name, range_start, range_end))")
.gcGraceSeconds(0);
private static final CFMetaData AvailableRanges =
compile(AVAILABLE_RANGES,
"available keyspace/ranges during bootstrap/replace that are ready to be served",
"CREATE TABLE %s ("
+ "keyspace_name text,"
+ "ranges set<blob>,"
+ "PRIMARY KEY ((keyspace_name)))");
private static final CFMetaData TransferredRanges =
compile(TRANSFERRED_RANGES,
"record of transferred ranges for streaming operation",
"CREATE TABLE %s ("
+ "operation text,"
+ "peer inet,"
+ "keyspace_name text,"
+ "ranges set<blob>,"
+ "PRIMARY KEY ((operation, keyspace_name), peer))");
private static final CFMetaData ViewsBuildsInProgress =
compile(VIEWS_BUILDS_IN_PROGRESS,
"views builds current progress",
"CREATE TABLE %s ("
+ "keyspace_name text,"
+ "view_name text,"
+ "last_token varchar,"
+ "generation_number int,"
+ "PRIMARY KEY ((keyspace_name), view_name))");
private static final CFMetaData BuiltViews =
compile(BUILT_VIEWS,
"built views",
"CREATE TABLE %s ("
+ "keyspace_name text,"
+ "view_name text,"
+ "status_replicated boolean,"
+ "PRIMARY KEY ((keyspace_name), view_name))");
private static final CFMetaData PreparedStatements =
compile(PREPARED_STATEMENTS,
"prepared statements",
"CREATE TABLE %s ("
+ "prepared_id blob,"
+ "logged_keyspace text,"
+ "query_string text,"
+ "PRIMARY KEY ((prepared_id)))");
@Deprecated
public static final CFMetaData LegacyHints =
compile(LEGACY_HINTS,
"*DEPRECATED* hints awaiting delivery",
"CREATE TABLE %s ("
+ "target_id uuid,"
+ "hint_id timeuuid,"
+ "message_version int,"
+ "mutation blob,"
+ "PRIMARY KEY ((target_id), hint_id, message_version)) "
+ "WITH COMPACT STORAGE")
.compaction(CompactionParams.scts(singletonMap("enabled", "false")))
.gcGraceSeconds(0);
@Deprecated
public static final CFMetaData LegacyBatchlog =
compile(LEGACY_BATCHLOG,
"*DEPRECATED* batchlog entries",
"CREATE TABLE %s ("
+ "id uuid,"
+ "data blob,"
+ "version int,"
+ "written_at timestamp,"
+ "PRIMARY KEY ((id)))")
.compaction(CompactionParams.scts(singletonMap("min_threshold", "2")))
.gcGraceSeconds(0);
@Deprecated
public static final CFMetaData LegacyKeyspaces =
compile(LEGACY_KEYSPACES,
"*DEPRECATED* keyspace definitions",
"CREATE TABLE %s ("
+ "keyspace_name text,"
+ "durable_writes boolean,"
+ "strategy_class text,"
+ "strategy_options text,"
+ "PRIMARY KEY ((keyspace_name))) "
+ "WITH COMPACT STORAGE");
@Deprecated
public static final CFMetaData LegacyColumnfamilies =
compile(LEGACY_COLUMNFAMILIES,
"*DEPRECATED* table definitions",
"CREATE TABLE %s ("
+ "keyspace_name text,"
+ "columnfamily_name text,"
+ "bloom_filter_fp_chance double,"
+ "caching text,"
+ "cf_id uuid,"
+ "comment text,"
+ "compaction_strategy_class text,"
+ "compaction_strategy_options text,"
+ "comparator text,"
+ "compression_parameters text,"
+ "default_time_to_live int,"
+ "default_validator text,"
+ "dropped_columns map<text, bigint>,"
+ "gc_grace_seconds int,"
+ "is_dense boolean,"
+ "key_validator text,"
+ "local_read_repair_chance double,"
+ "max_compaction_threshold int,"
+ "max_index_interval int,"
+ "memtable_flush_period_in_ms int,"
+ "min_compaction_threshold int,"
+ "min_index_interval int,"
+ "read_repair_chance double,"
+ "speculative_retry text,"
+ "subcomparator text,"
+ "type text,"
+ "PRIMARY KEY ((keyspace_name), columnfamily_name))");
@Deprecated
public static final CFMetaData LegacyColumns =
compile(LEGACY_COLUMNS,
"*DEPRECATED* column definitions",
"CREATE TABLE %s ("
+ "keyspace_name text,"
+ "columnfamily_name text,"
+ "column_name text,"
+ "component_index int,"
+ "index_name text,"
+ "index_options text,"
+ "index_type text,"
+ "type text,"
+ "validator text,"
+ "PRIMARY KEY ((keyspace_name), columnfamily_name, column_name))");
@Deprecated
public static final CFMetaData LegacyTriggers =
compile(LEGACY_TRIGGERS,
"*DEPRECATED* trigger definitions",
"CREATE TABLE %s ("
+ "keyspace_name text,"
+ "columnfamily_name text,"
+ "trigger_name text,"
+ "trigger_options map<text, text>,"
+ "PRIMARY KEY ((keyspace_name), columnfamily_name, trigger_name))");
@Deprecated
public static final CFMetaData LegacyUsertypes =
compile(LEGACY_USERTYPES,
"*DEPRECATED* user defined type definitions",
"CREATE TABLE %s ("
+ "keyspace_name text,"
+ "type_name text,"
+ "field_names list<text>,"
+ "field_types list<text>,"
+ "PRIMARY KEY ((keyspace_name), type_name))");
@Deprecated
public static final CFMetaData LegacyFunctions =
compile(LEGACY_FUNCTIONS,
"*DEPRECATED* user defined function definitions",
"CREATE TABLE %s ("
+ "keyspace_name text,"
+ "function_name text,"
+ "signature frozen<list<text>>,"
+ "argument_names list<text>,"
+ "argument_types list<text>,"
+ "body text,"
+ "language text,"
+ "return_type text,"
+ "called_on_null_input boolean,"
+ "PRIMARY KEY ((keyspace_name), function_name, signature))");
@Deprecated
public static final CFMetaData LegacyAggregates =
compile(LEGACY_AGGREGATES,
"*DEPRECATED* user defined aggregate definitions",
"CREATE TABLE %s ("
+ "keyspace_name text,"
+ "aggregate_name text,"
+ "signature frozen<list<text>>,"
+ "argument_types list<text>,"
+ "final_func text,"
+ "initcond blob,"
+ "return_type text,"
+ "state_func text,"
+ "state_type text,"
+ "PRIMARY KEY ((keyspace_name), aggregate_name, signature))");
private static CFMetaData compile(String name, String description, String schema)
{
return CFMetaData.compile(String.format(schema, name), SchemaConstants.SYSTEM_KEYSPACE_NAME)
.comment(description);
}
public static KeyspaceMetadata metadata()
{
return KeyspaceMetadata.create(SchemaConstants.SYSTEM_KEYSPACE_NAME, KeyspaceParams.local(), tables(), Views.none(), Types.none(), functions());
}
private static Tables tables()
{
return Tables.of(BuiltIndexes,
Batches,
Paxos,
Local,
Peers,
PeerEvents,
RangeXfers,
CompactionHistory,
SSTableActivity,
SizeEstimates,
AvailableRanges,
TransferredRanges,
ViewsBuildsInProgress,
BuiltViews,
LegacyHints,
LegacyBatchlog,
PreparedStatements,
LegacyKeyspaces,
LegacyColumnfamilies,
LegacyColumns,
LegacyTriggers,
LegacyUsertypes,
LegacyFunctions,
LegacyAggregates);
}
private static Functions functions()
{
return Functions.builder()
.add(UuidFcts.all())
.add(TimeFcts.all())
.add(BytesConversionFcts.all())
.add(AggregateFcts.all())
.add(CastFcts.all())
.build();
}
private static volatile Map<UUID, Pair<CommitLogPosition, Long>> truncationRecords;
public enum BootstrapState
{
NEEDS_BOOTSTRAP,
COMPLETED,
IN_PROGRESS,
DECOMMISSIONED
}
public static void finishStartup()
{
SchemaKeyspace.saveSystemKeyspacesSchema();
}
public static void persistLocalMetadata()
{
String req = "INSERT INTO system.%s (" +
"key," +
"cluster_name," +
"release_version," +
"cql_version," +
"thrift_version," +
"native_protocol_version," +
"data_center," +
"rack," +
"partitioner," +
"rpc_address," +
"broadcast_address," +
"listen_address" +
") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
IEndpointSnitch snitch = DatabaseDescriptor.getEndpointSnitch();
executeOnceInternal(String.format(req, LOCAL),
LOCAL,
DatabaseDescriptor.getClusterName(),
FBUtilities.getReleaseVersionString(),
QueryProcessor.CQL_VERSION.toString(),
cassandraConstants.VERSION,
String.valueOf(ProtocolVersion.CURRENT.asInt()),
snitch.getDatacenter(FBUtilities.getBroadcastAddress()),
snitch.getRack(FBUtilities.getBroadcastAddress()),
DatabaseDescriptor.getPartitioner().getClass().getName(),
DatabaseDescriptor.getRpcAddress(),
FBUtilities.getBroadcastAddress(),
FBUtilities.getLocalAddress());
}
public static void updateCompactionHistory(String ksname,
String cfname,
long compactedAt,
long bytesIn,
long bytesOut,
Map<Integer, Long> rowsMerged)
{
if (ksname.equals("system") && cfname.equals(COMPACTION_HISTORY))
return;
String req = "INSERT INTO system.%s (id, keyspace_name, columnfamily_name, compacted_at, bytes_in, bytes_out, rows_merged) VALUES (?, ?, ?, ?, ?, ?, ?)";
executeInternal(String.format(req, COMPACTION_HISTORY),
UUIDGen.getTimeUUID(),
ksname,
cfname,
ByteBufferUtil.bytes(compactedAt),
bytesIn,
bytesOut,
rowsMerged);
}
public static TabularData getCompactionHistory() throws OpenDataException
{
UntypedResultSet queryResultSet = executeInternal(String.format("SELECT * from system.%s", COMPACTION_HISTORY));
return CompactionHistoryTabularData.from(queryResultSet);
}
public static boolean isViewBuilt(String keyspaceName, String viewName)
{
String req = "SELECT view_name FROM %s.\"%s\" WHERE keyspace_name=? AND view_name=?";
UntypedResultSet result = executeInternal(String.format(req, SchemaConstants.SYSTEM_KEYSPACE_NAME, BUILT_VIEWS), keyspaceName, viewName);
return !result.isEmpty();
}
public static boolean isViewStatusReplicated(String keyspaceName, String viewName)
{
String req = "SELECT status_replicated FROM %s.\"%s\" WHERE keyspace_name=? AND view_name=?";
UntypedResultSet result = executeInternal(String.format(req, SchemaConstants.SYSTEM_KEYSPACE_NAME, BUILT_VIEWS), keyspaceName, viewName);
if (result.isEmpty())
return false;
UntypedResultSet.Row row = result.one();
return row.has("status_replicated") && row.getBoolean("status_replicated");
}
public static void setViewBuilt(String keyspaceName, String viewName, boolean replicated)
{
String req = "INSERT INTO %s.\"%s\" (keyspace_name, view_name, status_replicated) VALUES (?, ?, ?)";
executeInternal(String.format(req, SchemaConstants.SYSTEM_KEYSPACE_NAME, BUILT_VIEWS), keyspaceName, viewName, replicated);
forceBlockingFlush(BUILT_VIEWS);
}
public static void setViewRemoved(String keyspaceName, String viewName)
{
String buildReq = "DELETE FROM %S.%s WHERE keyspace_name = ? AND view_name = ?";
executeInternal(String.format(buildReq, SchemaConstants.SYSTEM_KEYSPACE_NAME, VIEWS_BUILDS_IN_PROGRESS), keyspaceName, viewName);
forceBlockingFlush(VIEWS_BUILDS_IN_PROGRESS);
String builtReq = "DELETE FROM %s.\"%s\" WHERE keyspace_name = ? AND view_name = ?";
executeInternal(String.format(builtReq, SchemaConstants.SYSTEM_KEYSPACE_NAME, BUILT_VIEWS), keyspaceName, viewName);
forceBlockingFlush(BUILT_VIEWS);
}
public static void beginViewBuild(String ksname, String viewName, int generationNumber)
{
executeInternal(String.format("INSERT INTO system.%s (keyspace_name, view_name, generation_number) VALUES (?, ?, ?)", VIEWS_BUILDS_IN_PROGRESS),
ksname,
viewName,
generationNumber);
}
public static void finishViewBuildStatus(String ksname, String viewName)
{
setViewBuilt(ksname, viewName, false);
executeInternal(String.format("DELETE FROM system.%s WHERE keyspace_name = ? AND view_name = ?", VIEWS_BUILDS_IN_PROGRESS), ksname, viewName);
forceBlockingFlush(VIEWS_BUILDS_IN_PROGRESS);
}
public static void setViewBuiltReplicated(String ksname, String viewName)
{
setViewBuilt(ksname, viewName, true);
}
public static void updateViewBuildStatus(String ksname, String viewName, Token token)
{
String req = "INSERT INTO system.%s (keyspace_name, view_name, last_token) VALUES (?, ?, ?)";
Token.TokenFactory factory = ViewsBuildsInProgress.partitioner.getTokenFactory();
executeInternal(String.format(req, VIEWS_BUILDS_IN_PROGRESS), ksname, viewName, factory.toString(token));
}
public static Pair<Integer, Token> getViewBuildStatus(String ksname, String viewName)
{
String req = "SELECT generation_number, last_token FROM system.%s WHERE keyspace_name = ? AND view_name = ?";
UntypedResultSet queryResultSet = executeInternal(String.format(req, VIEWS_BUILDS_IN_PROGRESS), ksname, viewName);
if (queryResultSet == null || queryResultSet.isEmpty())
return null;
UntypedResultSet.Row row = queryResultSet.one();
Integer generation = null;
Token lastKey = null;
if (row.has("generation_number"))
generation = row.getInt("generation_number");
if (row.has("last_key"))
{
Token.TokenFactory factory = ViewsBuildsInProgress.partitioner.getTokenFactory();
lastKey = factory.fromString(row.getString("last_key"));
}
return Pair.create(generation, lastKey);
}
public static synchronized void saveTruncationRecord(ColumnFamilyStore cfs, long truncatedAt, CommitLogPosition position)
{
String req = "UPDATE system.%s SET truncated_at = truncated_at + ? WHERE key = '%s'";
executeInternal(String.format(req, LOCAL, LOCAL), truncationAsMapEntry(cfs, truncatedAt, position));
truncationRecords = null;
forceBlockingFlush(LOCAL);
}
public static synchronized void removeTruncationRecord(UUID cfId)
{
String req = "DELETE truncated_at[?] from system.%s WHERE key = '%s'";
executeInternal(String.format(req, LOCAL, LOCAL), cfId);
truncationRecords = null;
forceBlockingFlush(LOCAL);
}
private static Map<UUID, ByteBuffer> truncationAsMapEntry(ColumnFamilyStore cfs, long truncatedAt, CommitLogPosition position)
{
try (DataOutputBuffer out = DataOutputBuffer.scratchBuffer.get())
{
CommitLogPosition.serializer.serialize(position, out);
out.writeLong(truncatedAt);
return singletonMap(cfs.metadata.cfId, out.asNewBuffer());
}
catch (IOException e)
{
throw new RuntimeException(e);
}
}
public static CommitLogPosition getTruncatedPosition(UUID cfId)
{
Pair<CommitLogPosition, Long> record = getTruncationRecord(cfId);
return record == null ? null : record.left;
}
public static long getTruncatedAt(UUID cfId)
{
Pair<CommitLogPosition, Long> record = getTruncationRecord(cfId);
return record == null ? Long.MIN_VALUE : record.right;
}
private static synchronized Pair<CommitLogPosition, Long> getTruncationRecord(UUID cfId)
{
if (truncationRecords == null)
truncationRecords = readTruncationRecords();
return truncationRecords.get(cfId);
}
private static Map<UUID, Pair<CommitLogPosition, Long>> readTruncationRecords()
{
UntypedResultSet rows = executeInternal(String.format("SELECT truncated_at FROM system.%s WHERE key = '%s'", LOCAL, LOCAL));
Map<UUID, Pair<CommitLogPosition, Long>> records = new HashMap<>();
if (!rows.isEmpty() && rows.one().has("truncated_at"))
{
Map<UUID, ByteBuffer> map = rows.one().getMap("truncated_at", UUIDType.instance, BytesType.instance);
for (Map.Entry<UUID, ByteBuffer> entry : map.entrySet())
records.put(entry.getKey(), truncationRecordFromBlob(entry.getValue()));
}
return records;
}
private static Pair<CommitLogPosition, Long> truncationRecordFromBlob(ByteBuffer bytes)
{
try (RebufferingInputStream in = new DataInputBuffer(bytes, true))
{
return Pair.create(CommitLogPosition.serializer.deserialize(in), in.available() > 0 ? in.readLong() : Long.MIN_VALUE);
}
catch (IOException e)
{
throw new RuntimeException(e);
}
}
public static Future<?> updateTokens(final InetAddress ep, final Collection<Token> tokens, ExecutorService executorService)
{
if (ep.equals(FBUtilities.getBroadcastAddress()))
return Futures.immediateFuture(null);
String req = "INSERT INTO system.%s (peer, tokens) VALUES (?, ?)";
return executorService.submit((Runnable) () -> executeInternal(String.format(req, PEERS), ep, tokensAsSet(tokens)));
}
public static void updatePreferredIP(InetAddress ep, InetAddress preferred_ip)
{
String req = "INSERT INTO system.%s (peer, preferred_ip) VALUES (?, ?)";
executeInternal(String.format(req, PEERS), ep, preferred_ip);
forceBlockingFlush(PEERS);
}
public static Future<?> updatePeerInfo(final InetAddress ep, final String columnName, final Object value, ExecutorService executorService)
{
if (ep.equals(FBUtilities.getBroadcastAddress()))
return Futures.immediateFuture(null);
String req = "INSERT INTO system.%s (peer, %s) VALUES (?, ?)";
return executorService.submit((Runnable) () -> executeInternal(String.format(req, PEERS, columnName), ep, value));
}
public static void updatePeerReleaseVersion(final InetAddress ep, final Object value, Runnable postUpdateTask, ExecutorService executorService)
{
if (ep.equals(FBUtilities.getBroadcastAddress()))
return;
String req = "INSERT INTO system.%s (peer, release_version) VALUES (?, ?)";
executorService.execute(() -> {
executeInternal(String.format(req, PEERS), ep, value);
postUpdateTask.run();
});
}
public static synchronized void updateHintsDropped(InetAddress ep, UUID timePeriod, int value)
{
String req = "UPDATE system.%s USING TTL 2592000 SET hints_dropped[ ? ] = ? WHERE peer = ?";
executeInternal(String.format(req, PEER_EVENTS), timePeriod, value, ep);
}
public static synchronized void updateSchemaVersion(UUID version)
{
String req = "INSERT INTO system.%s (key, schema_version) VALUES ('%s', ?)";
executeInternal(String.format(req, LOCAL, LOCAL), version);
}
private static Set<String> tokensAsSet(Collection<Token> tokens)
{
if (tokens.isEmpty())
return Collections.emptySet();
Token.TokenFactory factory = StorageService.instance.getTokenFactory();
Set<String> s = new HashSet<>(tokens.size());
for (Token tk : tokens)
s.add(factory.toString(tk));
return s;
}
private static Collection<Token> deserializeTokens(Collection<String> tokensStrings)
{
Token.TokenFactory factory = StorageService.instance.getTokenFactory();
List<Token> tokens = new ArrayList<>(tokensStrings.size());
for (String tk : tokensStrings)
tokens.add(factory.fromString(tk));
return tokens;
}
public static void removeEndpoint(InetAddress ep)
{
String req = "DELETE FROM system.%s WHERE peer = ?";
executeInternal(String.format(req, PEERS), ep);
forceBlockingFlush(PEERS);
}
public static synchronized void updateTokens(Collection<Token> tokens)
{
assert !tokens.isEmpty() : "removeEndpoint should be used instead";
String req = "INSERT INTO system.%s (key, tokens) VALUES ('%s', ?)";
executeInternal(String.format(req, LOCAL, LOCAL), tokensAsSet(tokens));
forceBlockingFlush(LOCAL);
}
public static void forceBlockingFlush(String cfname)
{
if (!DatabaseDescriptor.isUnsafeSystem())
FBUtilities.waitOnFuture(Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(cfname).forceFlush());
}
public static SetMultimap<InetAddress, Token> loadTokens()
{
SetMultimap<InetAddress, Token> tokenMap = HashMultimap.create();
for (UntypedResultSet.Row row : executeInternal("SELECT peer, tokens FROM system." + PEERS))
{
InetAddress peer = row.getInetAddress("peer");
if (row.has("tokens"))
tokenMap.putAll(peer, deserializeTokens(row.getSet("tokens", UTF8Type.instance)));
}
return tokenMap;
}
public static Map<InetAddress, UUID> loadHostIds()
{
Map<InetAddress, UUID> hostIdMap = new HashMap<>();
for (UntypedResultSet.Row row : executeInternal("SELECT peer, host_id FROM system." + PEERS))
{
InetAddress peer = row.getInetAddress("peer");
if (row.has("host_id"))
{
hostIdMap.put(peer, row.getUUID("host_id"));
}
}
return hostIdMap;
}
public static Map<InetAddress, CassandraVersion> loadPeerVersions()
{
Map<InetAddress, CassandraVersion> releaseVersionMap = new HashMap<>();
for (UntypedResultSet.Row row : executeInternal("SELECT peer, release_version FROM system." + PEERS))
{
InetAddress peer = row.getInetAddress("peer");
if (row.has("release_version"))
{
try
{
releaseVersionMap.put(peer, new CassandraVersion(row.getString("release_version")));
}
catch (IllegalArgumentException e)
{
logger.info("Invalid version string found for {}", peer);
releaseVersionMap.put(peer, NULL_VERSION);
}
}
else
{
logger.info("No version string found for {}", peer);
releaseVersionMap.put(peer, NULL_VERSION);
}
}
return releaseVersionMap;
}
public static InetAddress getPreferredIP(InetAddress ep)
{
String req = "SELECT preferred_ip FROM system.%s WHERE peer=?";
UntypedResultSet result = executeInternal(String.format(req, PEERS), ep);
if (!result.isEmpty() && result.one().has("preferred_ip"))
return result.one().getInetAddress("preferred_ip");
return ep;
}
public static Map<InetAddress, Map<String,String>> loadDcRackInfo()
{
Map<InetAddress, Map<String, String>> result = new HashMap<>();
for (UntypedResultSet.Row row : executeInternal("SELECT peer, data_center, rack from system." + PEERS))
{
InetAddress peer = row.getInetAddress("peer");
if (row.has("data_center") && row.has("rack"))
{
Map<String, String> dcRack = new HashMap<>();
dcRack.put("data_center", row.getString("data_center"));
dcRack.put("rack", row.getString("rack"));
result.put(peer, dcRack);
}
}
return result;
}
public static CassandraVersion getReleaseVersion(InetAddress ep)
{
try
{
if (FBUtilities.getBroadcastAddress().equals(ep))
{
return new CassandraVersion(FBUtilities.getReleaseVersionString());
}
String req = "SELECT release_version FROM system.%s WHERE peer=?";
UntypedResultSet result = executeInternal(String.format(req, PEERS), ep);
if (result != null && result.one().has("release_version"))
{
return new CassandraVersion(result.one().getString("release_version"));
}
return null;
}
catch (IllegalArgumentException e)
{
return null;
}
}
public static void checkHealth() throws ConfigurationException
{
Keyspace keyspace;
try
{
keyspace = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME);
}
catch (AssertionError err)
{
ConfigurationException ex = new ConfigurationException("Could not read system keyspace!");
ex.initCause(err);
throw ex;
}
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(LOCAL);
String req = "SELECT cluster_name FROM system.%s WHERE key='%s'";
UntypedResultSet result = executeInternal(String.format(req, LOCAL, LOCAL));
if (result.isEmpty() || !result.one().has("cluster_name"))
{
if (!cfs.getLiveSSTables().isEmpty())
throw new ConfigurationException("Found system keyspace files, but they couldn't be loaded!");
return;
}
String savedClusterName = result.one().getString("cluster_name");
if (!DatabaseDescriptor.getClusterName().equals(savedClusterName))
throw new ConfigurationException("Saved cluster name " + savedClusterName + " != configured name " + DatabaseDescriptor.getClusterName());
}
public static Collection<Token> getSavedTokens()
{
String req = "SELECT tokens FROM system.%s WHERE key='%s'";
UntypedResultSet result = executeInternal(String.format(req, LOCAL, LOCAL));
return result.isEmpty() || !result.one().has("tokens")
? Collections.<Token>emptyList()
: deserializeTokens(result.one().getSet("tokens", UTF8Type.instance));
}
public static int incrementAndGetGeneration()
{
String req = "SELECT gossip_generation FROM system.%s WHERE key='%s'";
UntypedResultSet result = executeInternal(String.format(req, LOCAL, LOCAL));
int generation;
if (result.isEmpty() || !result.one().has("gossip_generation"))
{
generation = (int) (System.currentTimeMillis() / 1000);
}
else
{
final int storedGeneration = result.one().getInt("gossip_generation") + 1;
final int now = (int) (System.currentTimeMillis() / 1000);
if (storedGeneration >= now)
{
logger.warn("Using stored Gossip Generation {} as it is greater than current system time {}. See CASSANDRA-3654 if you experience problems",
storedGeneration, now);
generation = storedGeneration;
}
else
{
generation = now;
}
}
req = "INSERT INTO system.%s (key, gossip_generation) VALUES ('%s', ?)";
executeInternal(String.format(req, LOCAL, LOCAL), generation);
forceBlockingFlush(LOCAL);
return generation;
}
public static BootstrapState getBootstrapState()
{
String req = "SELECT bootstrapped FROM system.%s WHERE key='%s'";
UntypedResultSet result = executeInternal(String.format(req, LOCAL, LOCAL));
if (result.isEmpty() || !result.one().has("bootstrapped"))
return BootstrapState.NEEDS_BOOTSTRAP;
return BootstrapState.valueOf(result.one().getString("bootstrapped"));
}
public static boolean bootstrapComplete()
{
return getBootstrapState() == BootstrapState.COMPLETED;
}
public static boolean bootstrapInProgress()
{
return getBootstrapState() == BootstrapState.IN_PROGRESS;
}
public static boolean wasDecommissioned()
{
return getBootstrapState() == BootstrapState.DECOMMISSIONED;
}
public static void setBootstrapState(BootstrapState state)
{
String req = "INSERT INTO system.%s (key, bootstrapped) VALUES ('%s', ?)";
executeInternal(String.format(req, LOCAL, LOCAL), state.name());
forceBlockingFlush(LOCAL);
}
public static boolean isIndexBuilt(String keyspaceName, String indexName)
{
String req = "SELECT index_name FROM %s.\"%s\" WHERE table_name=? AND index_name=?";
UntypedResultSet result = executeInternal(String.format(req, SchemaConstants.SYSTEM_KEYSPACE_NAME, BUILT_INDEXES), keyspaceName, indexName);
return !result.isEmpty();
}
public static void setIndexBuilt(String keyspaceName, String indexName)
{
String req = "INSERT INTO %s.\"%s\" (table_name, index_name) VALUES (?, ?)";
executeInternal(String.format(req, SchemaConstants.SYSTEM_KEYSPACE_NAME, BUILT_INDEXES), keyspaceName, indexName);
forceBlockingFlush(BUILT_INDEXES);
}
public static void setIndexRemoved(String keyspaceName, String indexName)
{
String req = "DELETE FROM %s.\"%s\" WHERE table_name = ? AND index_name = ?";
executeInternal(String.format(req, SchemaConstants.SYSTEM_KEYSPACE_NAME, BUILT_INDEXES), keyspaceName, indexName);
forceBlockingFlush(BUILT_INDEXES);
}
public static List<String> getBuiltIndexes(String keyspaceName, Set<String> indexNames)
{
List<String> names = new ArrayList<>(indexNames);
String req = "SELECT index_name from %s.\"%s\" WHERE table_name=? AND index_name IN ?";
UntypedResultSet results = executeInternal(String.format(req, SchemaConstants.SYSTEM_KEYSPACE_NAME, BUILT_INDEXES), keyspaceName, names);
return StreamSupport.stream(results.spliterator(), false)
.map(r -> r.getString("index_name"))
.collect(Collectors.toList());
}
public static UUID getLocalHostId()
{
String req = "SELECT host_id FROM system.%s WHERE key='%s'";
UntypedResultSet result = executeInternal(String.format(req, LOCAL, LOCAL));
if (!result.isEmpty() && result.one().has("host_id"))
return result.one().getUUID("host_id");
UUID hostId = UUID.randomUUID();
logger.warn("No host ID found, created {} (Note: This should happen exactly once per node).", hostId);
return setLocalHostId(hostId);
}
public static UUID setLocalHostId(UUID hostId)
{
String req = "INSERT INTO system.%s (key, host_id) VALUES ('%s', ?)";
executeInternal(String.format(req, LOCAL, LOCAL), hostId);
return hostId;
}
public static String getRack()
{
String req = "SELECT rack FROM system.%s WHERE key='%s'";
UntypedResultSet result = executeInternal(String.format(req, LOCAL, LOCAL));
if (!result.isEmpty() && result.one().has("rack"))
return result.one().getString("rack");
return null;
}
public static String getDatacenter()
{
String req = "SELECT data_center FROM system.%s WHERE key='%s'";
UntypedResultSet result = executeInternal(String.format(req, LOCAL, LOCAL));
if (!result.isEmpty() && result.one().has("data_center"))
return result.one().getString("data_center");
return null;
}
public static PaxosState loadPaxosState(DecoratedKey key, CFMetaData metadata, int nowInSec)
{
String req = "SELECT * FROM system.%s WHERE row_key = ? AND cf_id = ?";
UntypedResultSet results = QueryProcessor.executeInternalWithNow(nowInSec, System.nanoTime(), String.format(req, PAXOS), key.getKey(), metadata.cfId);
if (results.isEmpty())
return new PaxosState(key, metadata);
UntypedResultSet.Row row = results.one();
Commit promised = row.has("in_progress_ballot")
? new Commit(row.getUUID("in_progress_ballot"), new PartitionUpdate(metadata, key, metadata.partitionColumns(), 1))
: Commit.emptyCommit(key, metadata);
int proposalVersion = row.has("proposal_version") ? row.getInt("proposal_version") : MessagingService.VERSION_21;
Commit accepted = row.has("proposal")
? new Commit(row.getUUID("proposal_ballot"), PartitionUpdate.fromBytes(row.getBytes("proposal"), proposalVersion, key))
: Commit.emptyCommit(key, metadata);
int mostRecentVersion = row.has("most_recent_commit_version") ? row.getInt("most_recent_commit_version") : MessagingService.VERSION_21;
Commit mostRecent = row.has("most_recent_commit")
? new Commit(row.getUUID("most_recent_commit_at"), PartitionUpdate.fromBytes(row.getBytes("most_recent_commit"), mostRecentVersion, key))
: Commit.emptyCommit(key, metadata);
return new PaxosState(promised, accepted, mostRecent);
}
public static void savePaxosPromise(Commit promise)
{
String req = "UPDATE system.%s USING TIMESTAMP ? AND TTL ? SET in_progress_ballot = ? WHERE row_key = ? AND cf_id = ?";
executeInternal(String.format(req, PAXOS),
UUIDGen.microsTimestamp(promise.ballot),
paxosTtlSec(promise.update.metadata()),
promise.ballot,
promise.update.partitionKey().getKey(),
promise.update.metadata().cfId);
}
public static void savePaxosProposal(Commit proposal)
{
executeInternal(String.format("UPDATE system.%s USING TIMESTAMP ? AND TTL ? SET proposal_ballot = ?, proposal = ?, proposal_version = ? WHERE row_key = ? AND cf_id = ?", PAXOS),
UUIDGen.microsTimestamp(proposal.ballot),
paxosTtlSec(proposal.update.metadata()),
proposal.ballot,
PartitionUpdate.toBytes(proposal.update, MessagingService.current_version),
MessagingService.current_version,
proposal.update.partitionKey().getKey(),
proposal.update.metadata().cfId);
}
public static int paxosTtlSec(CFMetaData metadata)
{
return Math.max(3 * 3600, metadata.params.gcGraceSeconds);
}
public static void savePaxosCommit(Commit commit)
{
String cql = "UPDATE system.%s USING TIMESTAMP ? AND TTL ? SET proposal_ballot = null, proposal = null, most_recent_commit_at = ?, most_recent_commit = ?, most_recent_commit_version = ? WHERE row_key = ? AND cf_id = ?";
executeInternal(String.format(cql, PAXOS),
UUIDGen.microsTimestamp(commit.ballot),
paxosTtlSec(commit.update.metadata()),
commit.ballot,
PartitionUpdate.toBytes(commit.update, MessagingService.current_version),
MessagingService.current_version,
commit.update.partitionKey().getKey(),
commit.update.metadata().cfId);
}
public static RestorableMeter getSSTableReadMeter(String keyspace, String table, int generation)
{
String cql = "SELECT * FROM system.%s WHERE keyspace_name=? and columnfamily_name=? and generation=?";
UntypedResultSet results = executeInternal(String.format(cql, SSTABLE_ACTIVITY), keyspace, table, generation);
if (results.isEmpty())
return new RestorableMeter();
UntypedResultSet.Row row = results.one();
double m15rate = row.getDouble("rate_15m");
double m120rate = row.getDouble("rate_120m");
return new RestorableMeter(m15rate, m120rate);
}
public static void persistSSTableReadMeter(String keyspace, String table, int generation, RestorableMeter meter)
{
String cql = "INSERT INTO system.%s (keyspace_name, columnfamily_name, generation, rate_15m, rate_120m) VALUES (?, ?, ?, ?, ?) USING TTL 864000";
executeInternal(String.format(cql, SSTABLE_ACTIVITY),
keyspace,
table,
generation,
meter.fifteenMinuteRate(),
meter.twoHourRate());
}
public static void clearSSTableReadMeter(String keyspace, String table, int generation)
{
String cql = "DELETE FROM system.%s WHERE keyspace_name=? AND columnfamily_name=? and generation=?";
executeInternal(String.format(cql, SSTABLE_ACTIVITY), keyspace, table, generation);
}
public static void updateSizeEstimates(String keyspace, String table, Map<Range<Token>, Pair<Long, Long>> estimates)
{
long timestamp = FBUtilities.timestampMicros();
PartitionUpdate update = new PartitionUpdate(SizeEstimates, UTF8Type.instance.decompose(keyspace), SizeEstimates.partitionColumns(), estimates.size());
Mutation mutation = new Mutation(update);
int nowInSec = FBUtilities.nowInSeconds();
update.add(new RangeTombstone(Slice.make(SizeEstimates.comparator, table), new DeletionTime(timestamp - 1, nowInSec)));
for (Map.Entry<Range<Token>, Pair<Long, Long>> entry : estimates.entrySet())
{
Range<Token> range = entry.getKey();
Pair<Long, Long> values = entry.getValue();
update.add(Rows.simpleBuilder(SizeEstimates, table, range.left.toString(), range.right.toString())
.timestamp(timestamp)
.add("partitions_count", values.left)
.add("mean_partition_size", values.right)
.build());
}
mutation.apply();
}
public static void clearSizeEstimates(String keyspace, String table)
{
String cql = String.format("DELETE FROM %s.%s WHERE keyspace_name = ? AND table_name = ?", SchemaConstants.SYSTEM_KEYSPACE_NAME, SIZE_ESTIMATES);
executeInternal(cql, keyspace, table);
}
public static void clearSizeEstimates(String keyspace)
{
String cql = String.format("DELETE FROM %s.%s WHERE keyspace_name = ?", SchemaConstants.SYSTEM_KEYSPACE_NAME, SIZE_ESTIMATES);
executeInternal(cql, keyspace);
}
public static synchronized SetMultimap<String, String> getTablesWithSizeEstimates()
{
SetMultimap<String, String> keyspaceTableMap = HashMultimap.create();
String cql = String.format("SELECT keyspace_name, table_name FROM %s.%s", SchemaConstants.SYSTEM_KEYSPACE_NAME, SIZE_ESTIMATES);
UntypedResultSet rs = executeInternal(cql);
for (UntypedResultSet.Row row : rs)
{
keyspaceTableMap.put(row.getString("keyspace_name"), row.getString("table_name"));
}
return keyspaceTableMap;
}
public static synchronized void updateAvailableRanges(String keyspace, Collection<Range<Token>> completedRanges)
{
String cql = "UPDATE system.%s SET ranges = ranges + ? WHERE keyspace_name = ?";
Set<ByteBuffer> rangesToUpdate = new HashSet<>(completedRanges.size());
for (Range<Token> range : completedRanges)
{
rangesToUpdate.add(rangeToBytes(range));
}
executeInternal(String.format(cql, AVAILABLE_RANGES), rangesToUpdate, keyspace);
}
public static synchronized Set<Range<Token>> getAvailableRanges(String keyspace, IPartitioner partitioner)
{
Set<Range<Token>> result = new HashSet<>();
String query = "SELECT * FROM system.%s WHERE keyspace_name=?";
UntypedResultSet rs = executeInternal(String.format(query, AVAILABLE_RANGES), keyspace);
for (UntypedResultSet.Row row : rs)
{
Set<ByteBuffer> rawRanges = row.getSet("ranges", BytesType.instance);
for (ByteBuffer rawRange : rawRanges)
{
result.add(byteBufferToRange(rawRange, partitioner));
}
}
return ImmutableSet.copyOf(result);
}
public static void resetAvailableRanges()
{
ColumnFamilyStore availableRanges = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(AVAILABLE_RANGES);
availableRanges.truncateBlocking();
}
public static synchronized void updateTransferredRanges(String description,
InetAddress peer,
String keyspace,
Collection<Range<Token>> streamedRanges)
{
String cql = "UPDATE system.%s SET ranges = ranges + ? WHERE operation = ? AND peer = ? AND keyspace_name = ?";
Set<ByteBuffer> rangesToUpdate = new HashSet<>(streamedRanges.size());
for (Range<Token> range : streamedRanges)
{
rangesToUpdate.add(rangeToBytes(range));
}
executeInternal(String.format(cql, TRANSFERRED_RANGES), rangesToUpdate, description, peer, keyspace);
}
public static synchronized Map<InetAddress, Set<Range<Token>>> getTransferredRanges(String description, String keyspace, IPartitioner partitioner)
{
Map<InetAddress, Set<Range<Token>>> result = new HashMap<>();
String query = "SELECT * FROM system.%s WHERE operation = ? AND keyspace_name = ?";
UntypedResultSet rs = executeInternal(String.format(query, TRANSFERRED_RANGES), description, keyspace);
for (UntypedResultSet.Row row : rs)
{
InetAddress peer = row.getInetAddress("peer");
Set<ByteBuffer> rawRanges = row.getSet("ranges", BytesType.instance);
Set<Range<Token>> ranges = Sets.newHashSetWithExpectedSize(rawRanges.size());
for (ByteBuffer rawRange : rawRanges)
{
ranges.add(byteBufferToRange(rawRange, partitioner));
}
result.put(peer, ranges);
}
return ImmutableMap.copyOf(result);
}
public static boolean snapshotOnVersionChange() throws IOException
{
String previous = getPreviousVersionString();
String next = FBUtilities.getReleaseVersionString();
if (!previous.equals(NULL_VERSION.toString()) && !previous.equals(next))
{
logger.info("Detected version upgrade from {} to {}, snapshotting system keyspace", previous, next);
String snapshotName = Keyspace.getTimestampedSnapshotName(String.format("upgrade-%s-%s",
previous,
next));
Keyspace systemKs = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME);
systemKs.snapshot(snapshotName, null);
return true;
}
return false;
}
private static String getPreviousVersionString()
{
String req = "SELECT release_version FROM system.%s WHERE key='%s'";
UntypedResultSet result = executeInternal(String.format(req, SystemKeyspace.LOCAL, SystemKeyspace.LOCAL));
if (result.isEmpty() || !result.one().has("release_version"))
{
for (File dataDirectory : Directories.getKSChildDirectories(SchemaConstants.SYSTEM_KEYSPACE_NAME))
{
if (dataDirectory.getName().equals("Versions") && dataDirectory.listFiles().length > 0)
{
logger.trace("Found unreadable versions info in pre 1.2 system.Versions table");
return UNREADABLE_VERSION.toString();
}
}
return NULL_VERSION.toString();
}
return result.one().getString("release_version");
}
public static void migrateDataDirs()
{
Iterable<String> dirs = Arrays.asList(DatabaseDescriptor.getAllDataFileLocations());
for (String dataDir : dirs)
{
logger.debug("Checking {} for legacy files", dataDir);
File dir = new File(dataDir);
assert dir.exists() : dir + " should have been created by startup checks";
visitDirectory(dir.toPath(),
File::isDirectory,
ksdir ->
{
logger.trace("Checking {} for legacy files", ksdir);
visitDirectory(ksdir.toPath(),
File::isDirectory,
cfdir ->
{
logger.trace("Checking {} for legacy files", cfdir);
if (Descriptor.isLegacyFile(cfdir))
{
FileUtils.deleteRecursive(cfdir);
}
else
{
visitDirectory(cfdir.toPath(),
Descriptor::isLegacyFile,
FileUtils::delete);
}
});
});
}
}
private static ByteBuffer rangeToBytes(Range<Token> range)
{
try (DataOutputBuffer out = new DataOutputBuffer())
{
Range.tokenSerializer.serialize(range, out, MessagingService.VERSION_22);
return out.buffer();
}
catch (IOException e)
{
throw new IOError(e);
}
}
@SuppressWarnings("unchecked")
private static Range<Token> byteBufferToRange(ByteBuffer rawRange, IPartitioner partitioner)
{
try
{
return (Range<Token>) Range.tokenSerializer.deserialize(ByteStreams.newDataInput(ByteBufferUtil.getArray(rawRange)),
partitioner,
MessagingService.VERSION_22);
}
catch (IOException e)
{
throw new IOError(e);
}
}
public static void writePreparedStatement(String loggedKeyspace, MD5Digest key, String cql)
{
executeInternal(String.format("INSERT INTO %s.%s"
+ " (logged_keyspace, prepared_id, query_string) VALUES (?, ?, ?)",
SchemaConstants.SYSTEM_KEYSPACE_NAME, PREPARED_STATEMENTS),
loggedKeyspace, key.byteBuffer(), cql);
logger.debug("stored prepared statement for logged keyspace '{}': '{}'", loggedKeyspace, cql);
}
public static void removePreparedStatement(MD5Digest key)
{
executeInternal(String.format("DELETE FROM %s.%s"
+ " WHERE prepared_id = ?",
SchemaConstants.SYSTEM_KEYSPACE_NAME, PREPARED_STATEMENTS),
key.byteBuffer());
}
public static void resetPreparedStatements()
{
ColumnFamilyStore availableRanges = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(PREPARED_STATEMENTS);
availableRanges.truncateBlocking();
}
public static List<Pair<String, String>> loadPreparedStatements()
{
String query = String.format("SELECT logged_keyspace, query_string FROM %s.%s", SchemaConstants.SYSTEM_KEYSPACE_NAME, PREPARED_STATEMENTS);
UntypedResultSet resultSet = executeOnceInternal(query);
List<Pair<String, String>> r = new ArrayList<>();
for (UntypedResultSet.Row row : resultSet)
r.add(Pair.create(row.has("logged_keyspace") ? row.getString("logged_keyspace") : null,
row.getString("query_string")));
return r;
}
}