package com.android.internal.backup;
import android.app.backup.BackupAgent;
import android.app.backup.BackupDataInput;
import android.app.backup.BackupDataOutput;
import android.app.backup.BackupTransport;
import android.app.backup.RestoreDescription;
import android.app.backup.RestoreSet;
import android.content.ComponentName;
import android.content.Context;
import android.content.Intent;
import android.content.pm.PackageInfo;
import android.os.Environment;
import android.os.ParcelFileDescriptor;
import android.system.ErrnoException;
import android.system.Os;
import android.system.StructStat;
import android.util.ArrayMap;
import android.util.Log;
import com.android.org.bouncycastle.util.encoders.Base64;
import libcore.io.IoUtils;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
public class LocalTransport extends BackupTransport {
private static final String TAG = "LocalTransport";
private static final boolean DEBUG = false;
private static final String TRANSPORT_DIR_NAME
= "com.android.internal.backup.LocalTransport";
private static final String TRANSPORT_DESTINATION_STRING
= "Backing up to debug-only private cache";
private static final String TRANSPORT_DATA_MANAGEMENT_LABEL
= "";
private static final String INCREMENTAL_DIR = "_delta";
private static final String FULL_DATA_DIR = "_full";
private static final long CURRENT_SET_TOKEN = 1;
private static final long FULL_BACKUP_SIZE_QUOTA = 25 * 1024 * 1024;
private static final long KEY_VALUE_BACKUP_SIZE_QUOTA = 5 * 1024 * 1024;
private Context mContext;
private File mDataDir = new File(Environment.getDownloadCacheDirectory(), "backup");
private File mCurrentSetDir = new File(mDataDir, Long.toString(CURRENT_SET_TOKEN));
private File mCurrentSetIncrementalDir = new File(mCurrentSetDir, INCREMENTAL_DIR);
private File mCurrentSetFullDir = new File(mCurrentSetDir, FULL_DATA_DIR);
private PackageInfo[] mRestorePackages = null;
private int mRestorePackage = -1;
private int mRestoreType;
private File mRestoreSetDir;
private File mRestoreSetIncrementalDir;
private File mRestoreSetFullDir;
private String mFullTargetPackage;
private ParcelFileDescriptor mSocket;
private FileInputStream mSocketInputStream;
private BufferedOutputStream mFullBackupOutputStream;
private byte[] mFullBackupBuffer;
private long mFullBackupSize;
private FileInputStream mCurFullRestoreStream;
private FileOutputStream mFullRestoreSocketStream;
private byte[] mFullRestoreBuffer;
private final LocalTransportParameters mParameters;
private void makeDataDirs() {
mCurrentSetDir.mkdirs();
mCurrentSetFullDir.mkdir();
mCurrentSetIncrementalDir.mkdir();
}
public LocalTransport(Context context, LocalTransportParameters parameters) {
mContext = context;
mParameters = parameters;
makeDataDirs();
}
LocalTransportParameters getParameters() {
return mParameters;
}
@Override
public String name() {
return new ComponentName(mContext, this.getClass()).flattenToShortString();
}
@Override
public Intent configurationIntent() {
return null;
}
@Override
public String currentDestinationString() {
return TRANSPORT_DESTINATION_STRING;
}
public Intent dataManagementIntent() {
return null;
}
public String dataManagementLabel() {
return TRANSPORT_DATA_MANAGEMENT_LABEL;
}
@Override
public String transportDirName() {
return TRANSPORT_DIR_NAME;
}
@Override
public int getTransportFlags() {
int flags = super.getTransportFlags();
if (mParameters.isFakeEncryptionFlag()) {
flags |= BackupAgent.FLAG_FAKE_CLIENT_SIDE_ENCRYPTION_ENABLED;
}
return flags;
}
@Override
public long requestBackupTime() {
return 0;
}
@Override
public int initializeDevice() {
if (DEBUG) Log.v(TAG, "wiping all data");
deleteContents(mCurrentSetDir);
makeDataDirs();
return TRANSPORT_OK;
}
private class KVOperation {
final String key;
final byte[] value;
KVOperation(String k, byte[] v) {
key = k;
value = v;
}
}
@Override
public int performBackup(PackageInfo packageInfo, ParcelFileDescriptor data) {
return performBackup(packageInfo, data, 0);
}
@Override
public int performBackup(PackageInfo packageInfo, ParcelFileDescriptor data, int flags) {
boolean isIncremental = (flags & FLAG_INCREMENTAL) != 0;
boolean isNonIncremental = (flags & FLAG_NON_INCREMENTAL) != 0;
if (isIncremental) {
Log.i(TAG, "Performing incremental backup for " + packageInfo.packageName);
} else if (isNonIncremental) {
Log.i(TAG, "Performing non-incremental backup for " + packageInfo.packageName);
} else {
Log.i(TAG, "Performing backup for " + packageInfo.packageName);
}
if (DEBUG) {
try {
StructStat ss = Os.fstat(data.getFileDescriptor());
Log.v(TAG, "performBackup() pkg=" + packageInfo.packageName
+ " size=" + ss.st_size + " flags=" + flags);
} catch (ErrnoException e) {
Log.w(TAG, "Unable to stat input file in performBackup() on "
+ packageInfo.packageName);
}
}
File packageDir = new File(mCurrentSetIncrementalDir, packageInfo.packageName);
boolean hasDataForPackage = !packageDir.mkdirs();
if (isIncremental) {
if (mParameters.isNonIncrementalOnly() || !hasDataForPackage) {
if (mParameters.isNonIncrementalOnly()) {
Log.w(TAG, "Transport is in non-incremental only mode.");
} else {
Log.w(TAG,
"Requested incremental, but transport currently stores no data for the "
+ "package, requesting non-incremental retry.");
}
return TRANSPORT_NON_INCREMENTAL_BACKUP_REQUIRED;
}
}
if (isNonIncremental && hasDataForPackage) {
Log.w(TAG, "Requested non-incremental, deleting existing data.");
clearBackupData(packageInfo);
packageDir.mkdirs();
}
final ArrayList<KVOperation> changeOps;
try {
changeOps = parseBackupStream(data);
} catch (IOException e) {
Log.v(TAG, "Exception reading backup input", e);
return TRANSPORT_ERROR;
}
final ArrayMap<String, Integer> datastore = new ArrayMap<>();
int totalSize = parseKeySizes(packageDir, datastore);
if (DEBUG) {
if (changeOps.size() > 0) {
Log.v(TAG, "Calculating delta size impact");
} else {
Log.v(TAG, "No operations in backup stream, so no size change");
}
}
int updatedSize = totalSize;
for (KVOperation op : changeOps) {
final Integer curSize = datastore.get(op.key);
if (curSize != null) {
updatedSize -= curSize.intValue();
if (DEBUG && op.value == null) {
Log.v(TAG, " delete " + op.key + ", updated total " + updatedSize);
}
}
if (op.value != null) {
updatedSize += op.value.length;
if (DEBUG) {
Log.v(TAG, ((curSize == null) ? " new " : " replace ")
+ op.key + ", updated total " + updatedSize);
}
}
}
if (updatedSize > KEY_VALUE_BACKUP_SIZE_QUOTA) {
if (DEBUG) {
Log.i(TAG, "New datastore size " + updatedSize
+ " exceeds quota " + KEY_VALUE_BACKUP_SIZE_QUOTA);
}
return TRANSPORT_QUOTA_EXCEEDED;
}
for (KVOperation op : changeOps) {
File element = new File(packageDir, op.key);
element.delete();
if (op.value != null) {
try (FileOutputStream out = new FileOutputStream(element)) {
out.write(op.value, 0, op.value.length);
} catch (IOException e) {
Log.e(TAG, "Unable to update key file " + element);
return TRANSPORT_ERROR;
}
}
}
return TRANSPORT_OK;
}
private ArrayList<KVOperation> parseBackupStream(ParcelFileDescriptor data)
throws IOException {
ArrayList<KVOperation> changeOps = new ArrayList<>();
BackupDataInput changeSet = new BackupDataInput(data.getFileDescriptor());
while (changeSet.readNextHeader()) {
String key = changeSet.getKey();
String base64Key = new String(Base64.encode(key.getBytes()));
int dataSize = changeSet.getDataSize();
if (DEBUG) {
Log.v(TAG, " Delta operation key " + key + " size " + dataSize
+ " key64 " + base64Key);
}
byte[] buf = (dataSize >= 0) ? new byte[dataSize] : null;
if (dataSize >= 0) {
changeSet.readEntityData(buf, 0, dataSize);
}
changeOps.add(new KVOperation(base64Key, buf));
}
return changeOps;
}
private int parseKeySizes(File packageDir, ArrayMap<String, Integer> datastore) {
int totalSize = 0;
final String[] elements = packageDir.list();
if (elements != null) {
if (DEBUG) {
Log.v(TAG, "Existing datastore contents:");
}
for (String file : elements) {
File element = new File(packageDir, file);
String key = file;
int size = (int) element.length();
totalSize += size;
if (DEBUG) {
Log.v(TAG, " key " + key + " size " + size);
}
datastore.put(key, size);
}
if (DEBUG) {
Log.v(TAG, " TOTAL: " + totalSize);
}
} else {
if (DEBUG) {
Log.v(TAG, "No existing data for this package");
}
}
return totalSize;
}
private void deleteContents(File dirname) {
File[] contents = dirname.listFiles();
if (contents != null) {
for (File f : contents) {
if (f.isDirectory()) {
deleteContents(f);
}
f.delete();
}
}
}
@Override
public int clearBackupData(PackageInfo packageInfo) {
if (DEBUG) Log.v(TAG, "clearBackupData() pkg=" + packageInfo.packageName);
File packageDir = new File(mCurrentSetIncrementalDir, packageInfo.packageName);
final File[] fileset = packageDir.listFiles();
if (fileset != null) {
for (File f : fileset) {
f.delete();
}
packageDir.delete();
}
packageDir = new File(mCurrentSetFullDir, packageInfo.packageName);
final File[] tarballs = packageDir.listFiles();
if (tarballs != null) {
for (File f : tarballs) {
f.delete();
}
packageDir.delete();
}
return TRANSPORT_OK;
}
@Override
public int finishBackup() {
if (DEBUG) Log.v(TAG, "finishBackup() of " + mFullTargetPackage);
return tearDownFullBackup();
}
private int tearDownFullBackup() {
if (mSocket != null) {
try {
if (mFullBackupOutputStream != null) {
mFullBackupOutputStream.flush();
mFullBackupOutputStream.close();
}
mSocketInputStream = null;
mFullTargetPackage = null;
mSocket.close();
} catch (IOException e) {
if (DEBUG) {
Log.w(TAG, "Exception caught in tearDownFullBackup()", e);
}
return TRANSPORT_ERROR;
} finally {
mSocket = null;
mFullBackupOutputStream = null;
}
}
return TRANSPORT_OK;
}
private File tarballFile(String pkgName) {
return new File(mCurrentSetFullDir, pkgName);
}
@Override
public long requestFullBackupTime() {
return 0;
}
@Override
public int checkFullBackupSize(long size) {
int result = TRANSPORT_OK;
if (size <= 0) {
result = TRANSPORT_PACKAGE_REJECTED;
} else if (size > FULL_BACKUP_SIZE_QUOTA) {
result = TRANSPORT_QUOTA_EXCEEDED;
}
if (result != TRANSPORT_OK) {
if (DEBUG) {
Log.v(TAG, "Declining backup of size " + size);
}
}
return result;
}
@Override
public int performFullBackup(PackageInfo targetPackage, ParcelFileDescriptor socket) {
if (mSocket != null) {
Log.e(TAG, "Attempt to initiate full backup while one is in progress");
return TRANSPORT_ERROR;
}
if (DEBUG) {
Log.i(TAG, "performFullBackup : " + targetPackage);
}
try {
mFullBackupSize = 0;
mSocket = ParcelFileDescriptor.dup(socket.getFileDescriptor());
mSocketInputStream = new FileInputStream(mSocket.getFileDescriptor());
} catch (IOException e) {
Log.e(TAG, "Unable to process socket for full backup");
return TRANSPORT_ERROR;
}
mFullTargetPackage = targetPackage.packageName;
mFullBackupBuffer = new byte[4096];
return TRANSPORT_OK;
}
@Override
public int sendBackupData(final int numBytes) {
if (mSocket == null) {
Log.w(TAG, "Attempted sendBackupData before performFullBackup");
return TRANSPORT_ERROR;
}
mFullBackupSize += numBytes;
if (mFullBackupSize > FULL_BACKUP_SIZE_QUOTA) {
return TRANSPORT_QUOTA_EXCEEDED;
}
if (numBytes > mFullBackupBuffer.length) {
mFullBackupBuffer = new byte[numBytes];
}
if (mFullBackupOutputStream == null) {
FileOutputStream tarstream;
try {
File tarball = tarballFile(mFullTargetPackage);
tarstream = new FileOutputStream(tarball);
} catch (FileNotFoundException e) {
return TRANSPORT_ERROR;
}
mFullBackupOutputStream = new BufferedOutputStream(tarstream);
}
int bytesLeft = numBytes;
while (bytesLeft > 0) {
try {
int nRead = mSocketInputStream.read(mFullBackupBuffer, 0, bytesLeft);
if (nRead < 0) {
Log.w(TAG, "Unexpected EOD; failing backup");
return TRANSPORT_ERROR;
}
mFullBackupOutputStream.write(mFullBackupBuffer, 0, nRead);
bytesLeft -= nRead;
} catch (IOException e) {
Log.e(TAG, "Error handling backup data for " + mFullTargetPackage);
return TRANSPORT_ERROR;
}
}
if (DEBUG) {
Log.v(TAG, " stored " + numBytes + " of data");
}
return TRANSPORT_OK;
}
@Override
public void cancelFullBackup() {
if (DEBUG) {
Log.i(TAG, "Canceling full backup of " + mFullTargetPackage);
}
File archive = tarballFile(mFullTargetPackage);
tearDownFullBackup();
if (archive.exists()) {
archive.delete();
}
}
static final long[] POSSIBLE_SETS = { 2, 3, 4, 5, 6, 7, 8, 9 };
@Override
public RestoreSet[] getAvailableRestoreSets() {
long[] existing = new long[POSSIBLE_SETS.length + 1];
int num = 0;
for (long token : POSSIBLE_SETS) {
if ((new File(mDataDir, Long.toString(token))).exists()) {
existing[num++] = token;
}
}
existing[num++] = CURRENT_SET_TOKEN;
RestoreSet[] available = new RestoreSet[num];
for (int i = 0; i < available.length; i++) {
available[i] = new RestoreSet("Local disk image", "flash", existing[i]);
}
return available;
}
@Override
public long getCurrentRestoreSet() {
return CURRENT_SET_TOKEN;
}
@Override
public int startRestore(long token, PackageInfo[] packages) {
if (DEBUG) Log.v(TAG, "start restore " + token + " : " + packages.length
+ " matching packages");
mRestorePackages = packages;
mRestorePackage = -1;
mRestoreSetDir = new File(mDataDir, Long.toString(token));
mRestoreSetIncrementalDir = new File(mRestoreSetDir, INCREMENTAL_DIR);
mRestoreSetFullDir = new File(mRestoreSetDir, FULL_DATA_DIR);
return TRANSPORT_OK;
}
@Override
public RestoreDescription nextRestorePackage() {
if (DEBUG) {
Log.v(TAG, "nextRestorePackage() : mRestorePackage=" + mRestorePackage
+ " length=" + mRestorePackages.length);
}
if (mRestorePackages == null) throw new IllegalStateException("startRestore not called");
boolean found = false;
while (++mRestorePackage < mRestorePackages.length) {
String name = mRestorePackages[mRestorePackage].packageName;
String[] contents = (new File(mRestoreSetIncrementalDir, name)).list();
if (contents != null && contents.length > 0) {
if (DEBUG) {
Log.v(TAG, " nextRestorePackage(TYPE_KEY_VALUE) @ "
+ mRestorePackage + " = " + name);
}
mRestoreType = RestoreDescription.TYPE_KEY_VALUE;
found = true;
}
if (!found) {
File maybeFullData = new File(mRestoreSetFullDir, name);
if (maybeFullData.length() > 0) {
if (DEBUG) {
Log.v(TAG, " nextRestorePackage(TYPE_FULL_STREAM) @ "
+ mRestorePackage + " = " + name);
}
mRestoreType = RestoreDescription.TYPE_FULL_STREAM;
mCurFullRestoreStream = null;
found = true;
}
}
if (found) {
return new RestoreDescription(name, mRestoreType);
}
if (DEBUG) {
Log.v(TAG, " ... package @ " + mRestorePackage + " = " + name
+ " has no data; skipping");
}
}
if (DEBUG) Log.v(TAG, " no more packages to restore");
return RestoreDescription.NO_MORE_PACKAGES;
}
@Override
public int getRestoreData(ParcelFileDescriptor outFd) {
if (mRestorePackages == null) throw new IllegalStateException("startRestore not called");
if (mRestorePackage < 0) throw new IllegalStateException("nextRestorePackage not called");
if (mRestoreType != RestoreDescription.TYPE_KEY_VALUE) {
throw new IllegalStateException("getRestoreData(fd) for non-key/value dataset");
}
File packageDir = new File(mRestoreSetIncrementalDir,
mRestorePackages[mRestorePackage].packageName);
ArrayList<DecodedFilename> blobs = contentsByKey(packageDir);
if (blobs == null) {
Log.e(TAG, "No keys for package: " + packageDir);
return TRANSPORT_ERROR;
}
if (DEBUG) Log.v(TAG, " getRestoreData() found " + blobs.size() + " key files");
BackupDataOutput out = new BackupDataOutput(outFd.getFileDescriptor());
try {
for (DecodedFilename keyEntry : blobs) {
File f = keyEntry.file;
FileInputStream in = new FileInputStream(f);
try {
int size = (int) f.length();
byte[] buf = new byte[size];
in.read(buf);
if (DEBUG) Log.v(TAG, " ... key=" + keyEntry.key + " size=" + size);
out.writeEntityHeader(keyEntry.key, size);
out.writeEntityData(buf, size);
} finally {
in.close();
}
}
return TRANSPORT_OK;
} catch (IOException e) {
Log.e(TAG, "Unable to read backup records", e);
return TRANSPORT_ERROR;
}
}
static class DecodedFilename implements Comparable<DecodedFilename> {
public File file;
public String key;
public DecodedFilename(File f) {
file = f;
key = new String(Base64.decode(f.getName()));
}
@Override
public int compareTo(DecodedFilename other) {
return key.compareTo(other.key);
}
}
private ArrayList<DecodedFilename> contentsByKey(File dir) {
File[] allFiles = dir.listFiles();
if (allFiles == null || allFiles.length == 0) {
return null;
}
ArrayList<DecodedFilename> contents = new ArrayList<DecodedFilename>();
for (File f : allFiles) {
contents.add(new DecodedFilename(f));
}
Collections.sort(contents);
return contents;
}
@Override
public void finishRestore() {
if (DEBUG) Log.v(TAG, "finishRestore()");
if (mRestoreType == RestoreDescription.TYPE_FULL_STREAM) {
resetFullRestoreState();
}
mRestoreType = 0;
}
private void resetFullRestoreState() {
IoUtils.closeQuietly(mCurFullRestoreStream);
mCurFullRestoreStream = null;
mFullRestoreSocketStream = null;
mFullRestoreBuffer = null;
}
@Override
public int getNextFullRestoreDataChunk(ParcelFileDescriptor socket) {
if (mRestoreType != RestoreDescription.TYPE_FULL_STREAM) {
throw new IllegalStateException("Asked for full restore data for non-stream package");
}
if (mCurFullRestoreStream == null) {
final String name = mRestorePackages[mRestorePackage].packageName;
if (DEBUG) Log.i(TAG, "Starting full restore of " + name);
File dataset = new File(mRestoreSetFullDir, name);
try {
mCurFullRestoreStream = new FileInputStream(dataset);
} catch (IOException e) {
Log.e(TAG, "Unable to read archive for " + name);
return TRANSPORT_PACKAGE_REJECTED;
}
mFullRestoreSocketStream = new FileOutputStream(socket.getFileDescriptor());
mFullRestoreBuffer = new byte[2*1024];
}
int nRead;
try {
nRead = mCurFullRestoreStream.read(mFullRestoreBuffer);
if (nRead < 0) {
nRead = NO_MORE_DATA;
} else if (nRead == 0) {
Log.w(TAG, "read() of archive file returned 0; treating as EOF");
nRead = NO_MORE_DATA;
} else {
if (DEBUG) {
Log.i(TAG, " delivering restore chunk: " + nRead);
}
mFullRestoreSocketStream.write(mFullRestoreBuffer, 0, nRead);
}
} catch (IOException e) {
return TRANSPORT_ERROR;
} finally {
}
return nRead;
}
@Override
public int abortFullRestore() {
if (mRestoreType != RestoreDescription.TYPE_FULL_STREAM) {
throw new IllegalStateException("abortFullRestore() but not currently restoring");
}
resetFullRestoreState();
mRestoreType = 0;
return TRANSPORT_OK;
}
@Override
public long getBackupQuota(String packageName, boolean isFullBackup) {
return isFullBackup ? FULL_BACKUP_SIZE_QUOTA : KEY_VALUE_BACKUP_SIZE_QUOTA;
}
}