summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/android/net/NetworkStackIpMemoryStore.java42
-rw-r--r--src/android/net/ip/IpClient.java12
-rw-r--r--src/com/android/server/NetworkStackService.java32
-rw-r--r--src/com/android/server/connectivity/ipmemorystore/IpMemoryStoreDatabase.java515
-rw-r--r--src/com/android/server/connectivity/ipmemorystore/IpMemoryStoreService.java397
-rw-r--r--src/com/android/server/connectivity/ipmemorystore/RelevanceUtils.java307
-rw-r--r--src/com/android/server/connectivity/ipmemorystore/Utils.java52
7 files changed, 1352 insertions, 5 deletions
diff --git a/src/android/net/NetworkStackIpMemoryStore.java b/src/android/net/NetworkStackIpMemoryStore.java
new file mode 100644
index 0000000..475f826
--- /dev/null
+++ b/src/android/net/NetworkStackIpMemoryStore.java
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.net;
+
+import android.annotation.NonNull;
+import android.content.Context;
+
+/**
+ * service used to communicate with the ip memory store service in network stack,
+ * which is running in the same module.
+ * @see com.android.server.connectivity.ipmemorystore.IpMemoryStoreService
+ * @hide
+ */
+public class NetworkStackIpMemoryStore extends IpMemoryStoreClient {
+ @NonNull private final IIpMemoryStore mService;
+
+ public NetworkStackIpMemoryStore(@NonNull final Context context,
+ @NonNull final IIpMemoryStore service) {
+ super(context);
+ mService = service;
+ }
+
+ @Override
+ @NonNull
+ protected IIpMemoryStore getService() {
+ return mService;
+ }
+}
diff --git a/src/android/net/ip/IpClient.java b/src/android/net/ip/IpClient.java
index 7c7cdbd..b68fe23 100644
--- a/src/android/net/ip/IpClient.java
+++ b/src/android/net/ip/IpClient.java
@@ -29,6 +29,7 @@ import android.net.INetd;
import android.net.IpPrefix;
import android.net.LinkAddress;
import android.net.LinkProperties;
+import android.net.NetworkStackIpMemoryStore;
import android.net.ProvisioningConfigurationParcelable;
import android.net.ProxyInfo;
import android.net.RouteInfo;
@@ -61,6 +62,7 @@ import com.android.internal.util.State;
import com.android.internal.util.StateMachine;
import com.android.internal.util.WakeupMessage;
import com.android.server.NetworkObserverRegistry;
+import com.android.server.NetworkStackService.NetworkStackServiceManager;
import java.io.FileDescriptor;
import java.io.PrintWriter;
@@ -100,6 +102,7 @@ public class IpClient extends StateMachine {
// One holds StateMachine logs and the other connectivity packet logs.
private static final ConcurrentHashMap<String, SharedLog> sSmLogs = new ConcurrentHashMap<>();
private static final ConcurrentHashMap<String, LocalLog> sPktLogs = new ConcurrentHashMap<>();
+ private final NetworkStackIpMemoryStore mIpMemoryStore;
/**
* Dump all state machine and connectivity packet logs to the specified writer.
@@ -388,13 +391,14 @@ public class IpClient extends StateMachine {
}
public IpClient(Context context, String ifName, IIpClientCallbacks callback,
- NetworkObserverRegistry observerRegistry) {
- this(context, ifName, callback, observerRegistry, new Dependencies());
+ NetworkObserverRegistry observerRegistry, NetworkStackServiceManager nssManager) {
+ this(context, ifName, callback, observerRegistry, nssManager, new Dependencies());
}
@VisibleForTesting
IpClient(Context context, String ifName, IIpClientCallbacks callback,
- NetworkObserverRegistry observerRegistry, Dependencies deps) {
+ NetworkObserverRegistry observerRegistry, NetworkStackServiceManager nssManager,
+ Dependencies deps) {
super(IpClient.class.getSimpleName() + "." + ifName);
Preconditions.checkNotNull(ifName);
Preconditions.checkNotNull(callback);
@@ -408,6 +412,8 @@ public class IpClient extends StateMachine {
mShutdownLatch = new CountDownLatch(1);
mCm = mContext.getSystemService(ConnectivityManager.class);
mObserverRegistry = observerRegistry;
+ mIpMemoryStore =
+ new NetworkStackIpMemoryStore(context, nssManager.getIpMemoryStoreService());
sSmLogs.putIfAbsent(mInterfaceName, new SharedLog(MAX_LOG_RECORDS, mTag));
mLog = sSmLogs.get(mInterfaceName);
diff --git a/src/com/android/server/NetworkStackService.java b/src/com/android/server/NetworkStackService.java
index e7c8e85..335d951 100644
--- a/src/com/android/server/NetworkStackService.java
+++ b/src/com/android/server/NetworkStackService.java
@@ -29,6 +29,8 @@ import android.app.Service;
import android.content.Context;
import android.content.Intent;
import android.net.ConnectivityManager;
+import android.net.IIpMemoryStore;
+import android.net.IIpMemoryStoreCallbacks;
import android.net.INetd;
import android.net.INetworkMonitor;
import android.net.INetworkMonitorCallbacks;
@@ -49,6 +51,7 @@ import android.os.RemoteException;
import com.android.internal.annotations.GuardedBy;
import com.android.internal.util.IndentingPrintWriter;
import com.android.server.connectivity.NetworkMonitor;
+import com.android.server.connectivity.ipmemorystore.IpMemoryStoreService;
import java.io.FileDescriptor;
import java.io.PrintWriter;
@@ -86,7 +89,19 @@ public class NetworkStackService extends Service {
return makeConnector(this);
}
- private static class NetworkStackConnector extends INetworkStackConnector.Stub {
+ /**
+ * An interface for internal clients of the network stack service that can return
+ * or create inline instances of the service it manages.
+ */
+ public interface NetworkStackServiceManager {
+ /**
+ * Get an instance of the IpMemoryStoreService.
+ */
+ IIpMemoryStore getIpMemoryStoreService();
+ }
+
+ private static class NetworkStackConnector extends INetworkStackConnector.Stub
+ implements NetworkStackServiceManager {
private static final int NUM_VALIDATION_LOG_LINES = 20;
private final Context mContext;
private final INetd mNetd;
@@ -94,6 +109,7 @@ public class NetworkStackService extends Service {
private final ConnectivityManager mCm;
@GuardedBy("mIpClients")
private final ArrayList<WeakReference<IpClient>> mIpClients = new ArrayList<>();
+ private final IpMemoryStoreService mIpMemoryStoreService;
private static final int MAX_VALIDATION_LOGS = 10;
@GuardedBy("mValidationLogs")
@@ -116,6 +132,7 @@ public class NetworkStackService extends Service {
(IBinder) context.getSystemService(Context.NETD_SERVICE));
mObserverRegistry = new NetworkObserverRegistry();
mCm = context.getSystemService(ConnectivityManager.class);
+ mIpMemoryStoreService = new IpMemoryStoreService(context);
try {
mObserverRegistry.register(mNetd);
@@ -159,7 +176,7 @@ public class NetworkStackService extends Service {
@Override
public void makeIpClient(String ifName, IIpClientCallbacks cb) throws RemoteException {
- final IpClient ipClient = new IpClient(mContext, ifName, cb, mObserverRegistry);
+ final IpClient ipClient = new IpClient(mContext, ifName, cb, mObserverRegistry, this);
synchronized (mIpClients) {
final Iterator<WeakReference<IpClient>> it = mIpClients.iterator();
@@ -176,6 +193,17 @@ public class NetworkStackService extends Service {
}
@Override
+ public IIpMemoryStore getIpMemoryStoreService() {
+ return mIpMemoryStoreService;
+ }
+
+ @Override
+ public void fetchIpMemoryStore(@NonNull final IIpMemoryStoreCallbacks cb)
+ throws RemoteException {
+ cb.onIpMemoryStoreFetched(mIpMemoryStoreService);
+ }
+
+ @Override
protected void dump(@NonNull FileDescriptor fd, @NonNull PrintWriter fout,
@Nullable String[] args) {
checkDumpPermission();
diff --git a/src/com/android/server/connectivity/ipmemorystore/IpMemoryStoreDatabase.java b/src/com/android/server/connectivity/ipmemorystore/IpMemoryStoreDatabase.java
new file mode 100644
index 0000000..4d4ceed
--- /dev/null
+++ b/src/com/android/server/connectivity/ipmemorystore/IpMemoryStoreDatabase.java
@@ -0,0 +1,515 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.server.connectivity.ipmemorystore;
+
+import static android.net.shared.Inet4AddressUtils.inet4AddressToIntHTH;
+import static android.net.shared.Inet4AddressUtils.intToInet4AddressHTH;
+
+import android.annotation.NonNull;
+import android.annotation.Nullable;
+import android.content.ContentValues;
+import android.content.Context;
+import android.database.Cursor;
+import android.database.sqlite.SQLiteCursor;
+import android.database.sqlite.SQLiteCursorDriver;
+import android.database.sqlite.SQLiteDatabase;
+import android.database.sqlite.SQLiteException;
+import android.database.sqlite.SQLiteOpenHelper;
+import android.database.sqlite.SQLiteQuery;
+import android.net.ipmemorystore.NetworkAttributes;
+import android.net.ipmemorystore.Status;
+import android.util.Log;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.StringJoiner;
+
+/**
+ * Encapsulating class for using the SQLite database backing the memory store.
+ *
+ * This class groups together the contracts and the SQLite helper used to
+ * use the database.
+ *
+ * @hide
+ */
+public class IpMemoryStoreDatabase {
+ private static final String TAG = IpMemoryStoreDatabase.class.getSimpleName();
+ // A pair of NetworkAttributes objects is group-close if the confidence that they are
+ // the same is above this cutoff. See NetworkAttributes and SameL3NetworkResponse.
+ private static final float GROUPCLOSE_CONFIDENCE = 0.5f;
+
+ /**
+ * Contract class for the Network Attributes table.
+ */
+ public static class NetworkAttributesContract {
+ public static final String TABLENAME = "NetworkAttributes";
+
+ public static final String COLNAME_L2KEY = "l2Key";
+ public static final String COLTYPE_L2KEY = "TEXT NOT NULL";
+
+ public static final String COLNAME_EXPIRYDATE = "expiryDate";
+ // Milliseconds since the Epoch, in true Java style
+ public static final String COLTYPE_EXPIRYDATE = "BIGINT";
+
+ public static final String COLNAME_ASSIGNEDV4ADDRESS = "assignedV4Address";
+ public static final String COLTYPE_ASSIGNEDV4ADDRESS = "INTEGER";
+
+ // Please note that the group hint is only a *hint*, hence its name. The client can offer
+ // this information to nudge the grouping in the decision it thinks is right, but it can't
+ // decide for the memory store what is the same L3 network.
+ public static final String COLNAME_GROUPHINT = "groupHint";
+ public static final String COLTYPE_GROUPHINT = "TEXT";
+
+ public static final String COLNAME_DNSADDRESSES = "dnsAddresses";
+ // Stored in marshalled form as is
+ public static final String COLTYPE_DNSADDRESSES = "BLOB";
+
+ public static final String COLNAME_MTU = "mtu";
+ public static final String COLTYPE_MTU = "INTEGER DEFAULT -1";
+
+ public static final String CREATE_TABLE = "CREATE TABLE IF NOT EXISTS "
+ + TABLENAME + " ("
+ + COLNAME_L2KEY + " " + COLTYPE_L2KEY + " PRIMARY KEY NOT NULL, "
+ + COLNAME_EXPIRYDATE + " " + COLTYPE_EXPIRYDATE + ", "
+ + COLNAME_ASSIGNEDV4ADDRESS + " " + COLTYPE_ASSIGNEDV4ADDRESS + ", "
+ + COLNAME_GROUPHINT + " " + COLTYPE_GROUPHINT + ", "
+ + COLNAME_DNSADDRESSES + " " + COLTYPE_DNSADDRESSES + ", "
+ + COLNAME_MTU + " " + COLTYPE_MTU + ")";
+ public static final String DROP_TABLE = "DROP TABLE IF EXISTS " + TABLENAME;
+ }
+
+ /**
+ * Contract class for the Private Data table.
+ */
+ public static class PrivateDataContract {
+ public static final String TABLENAME = "PrivateData";
+
+ public static final String COLNAME_L2KEY = "l2Key";
+ public static final String COLTYPE_L2KEY = "TEXT NOT NULL";
+
+ public static final String COLNAME_CLIENT = "client";
+ public static final String COLTYPE_CLIENT = "TEXT NOT NULL";
+
+ public static final String COLNAME_DATANAME = "dataName";
+ public static final String COLTYPE_DATANAME = "TEXT NOT NULL";
+
+ public static final String COLNAME_DATA = "data";
+ public static final String COLTYPE_DATA = "BLOB NOT NULL";
+
+ public static final String CREATE_TABLE = "CREATE TABLE IF NOT EXISTS "
+ + TABLENAME + " ("
+ + COLNAME_L2KEY + " " + COLTYPE_L2KEY + ", "
+ + COLNAME_CLIENT + " " + COLTYPE_CLIENT + ", "
+ + COLNAME_DATANAME + " " + COLTYPE_DATANAME + ", "
+ + COLNAME_DATA + " " + COLTYPE_DATA + ", "
+ + "PRIMARY KEY ("
+ + COLNAME_L2KEY + ", "
+ + COLNAME_CLIENT + ", "
+ + COLNAME_DATANAME + "))";
+ public static final String DROP_TABLE = "DROP TABLE IF EXISTS " + TABLENAME;
+ }
+
+ // To save memory when the DB is not used, close it after 30s of inactivity. This is
+ // determined manually based on what feels right.
+ private static final long IDLE_CONNECTION_TIMEOUT_MS = 30_000;
+
+ /** The SQLite DB helper */
+ public static class DbHelper extends SQLiteOpenHelper {
+ // Update this whenever changing the schema.
+ private static final int SCHEMA_VERSION = 2;
+ private static final String DATABASE_FILENAME = "IpMemoryStore.db";
+
+ public DbHelper(@NonNull final Context context) {
+ super(context, DATABASE_FILENAME, null, SCHEMA_VERSION);
+ setIdleConnectionTimeout(IDLE_CONNECTION_TIMEOUT_MS);
+ }
+
+ /** Called when the database is created */
+ @Override
+ public void onCreate(@NonNull final SQLiteDatabase db) {
+ db.execSQL(NetworkAttributesContract.CREATE_TABLE);
+ db.execSQL(PrivateDataContract.CREATE_TABLE);
+ }
+
+ /** Called when the database is upgraded */
+ @Override
+ public void onUpgrade(@NonNull final SQLiteDatabase db, final int oldVersion,
+ final int newVersion) {
+ // No upgrade supported yet.
+ db.execSQL(NetworkAttributesContract.DROP_TABLE);
+ db.execSQL(PrivateDataContract.DROP_TABLE);
+ onCreate(db);
+ }
+
+ /** Called when the database is downgraded */
+ @Override
+ public void onDowngrade(@NonNull final SQLiteDatabase db, final int oldVersion,
+ final int newVersion) {
+ // Downgrades always nuke all data and recreate an empty table.
+ db.execSQL(NetworkAttributesContract.DROP_TABLE);
+ db.execSQL(PrivateDataContract.DROP_TABLE);
+ onCreate(db);
+ }
+ }
+
+ @NonNull
+ private static byte[] encodeAddressList(@NonNull final List<InetAddress> addresses) {
+ final ByteArrayOutputStream os = new ByteArrayOutputStream();
+ for (final InetAddress address : addresses) {
+ final byte[] b = address.getAddress();
+ os.write(b.length);
+ os.write(b, 0, b.length);
+ }
+ return os.toByteArray();
+ }
+
+ @NonNull
+ private static ArrayList<InetAddress> decodeAddressList(@NonNull final byte[] encoded) {
+ final ByteArrayInputStream is = new ByteArrayInputStream(encoded);
+ final ArrayList<InetAddress> addresses = new ArrayList<>();
+ int d = -1;
+ while ((d = is.read()) != -1) {
+ final byte[] bytes = new byte[d];
+ is.read(bytes, 0, d);
+ try {
+ addresses.add(InetAddress.getByAddress(bytes));
+ } catch (UnknownHostException e) { /* Hopefully impossible */ }
+ }
+ return addresses;
+ }
+
+ @NonNull
+ private static ContentValues toContentValues(@Nullable final NetworkAttributes attributes) {
+ final ContentValues values = new ContentValues();
+ if (null == attributes) return values;
+ if (null != attributes.assignedV4Address) {
+ values.put(NetworkAttributesContract.COLNAME_ASSIGNEDV4ADDRESS,
+ inet4AddressToIntHTH(attributes.assignedV4Address));
+ }
+ if (null != attributes.groupHint) {
+ values.put(NetworkAttributesContract.COLNAME_GROUPHINT, attributes.groupHint);
+ }
+ if (null != attributes.dnsAddresses) {
+ values.put(NetworkAttributesContract.COLNAME_DNSADDRESSES,
+ encodeAddressList(attributes.dnsAddresses));
+ }
+ if (null != attributes.mtu) {
+ values.put(NetworkAttributesContract.COLNAME_MTU, attributes.mtu);
+ }
+ return values;
+ }
+
+ // Convert a NetworkAttributes object to content values to store them in a table compliant
+ // with the contract defined in NetworkAttributesContract.
+ @NonNull
+ private static ContentValues toContentValues(@NonNull final String key,
+ @Nullable final NetworkAttributes attributes, final long expiry) {
+ final ContentValues values = toContentValues(attributes);
+ values.put(NetworkAttributesContract.COLNAME_L2KEY, key);
+ values.put(NetworkAttributesContract.COLNAME_EXPIRYDATE, expiry);
+ return values;
+ }
+
+ // Convert a byte array into content values to store it in a table compliant with the
+ // contract defined in PrivateDataContract.
+ @NonNull
+ private static ContentValues toContentValues(@NonNull final String key,
+ @NonNull final String clientId, @NonNull final String name,
+ @NonNull final byte[] data) {
+ final ContentValues values = new ContentValues();
+ values.put(PrivateDataContract.COLNAME_L2KEY, key);
+ values.put(PrivateDataContract.COLNAME_CLIENT, clientId);
+ values.put(PrivateDataContract.COLNAME_DATANAME, name);
+ values.put(PrivateDataContract.COLNAME_DATA, data);
+ return values;
+ }
+
+ @Nullable
+ private static NetworkAttributes readNetworkAttributesLine(@NonNull final Cursor cursor) {
+ // Make sure the data hasn't expired
+ final long expiry = getLong(cursor, NetworkAttributesContract.COLNAME_EXPIRYDATE, -1L);
+ if (expiry < System.currentTimeMillis()) return null;
+
+ final NetworkAttributes.Builder builder = new NetworkAttributes.Builder();
+ final int assignedV4AddressInt = getInt(cursor,
+ NetworkAttributesContract.COLNAME_ASSIGNEDV4ADDRESS, 0);
+ final String groupHint = getString(cursor, NetworkAttributesContract.COLNAME_GROUPHINT);
+ final byte[] dnsAddressesBlob =
+ getBlob(cursor, NetworkAttributesContract.COLNAME_DNSADDRESSES);
+ final int mtu = getInt(cursor, NetworkAttributesContract.COLNAME_MTU, -1);
+ if (0 != assignedV4AddressInt) {
+ builder.setAssignedV4Address(intToInet4AddressHTH(assignedV4AddressInt));
+ }
+ builder.setGroupHint(groupHint);
+ if (null != dnsAddressesBlob) {
+ builder.setDnsAddresses(decodeAddressList(dnsAddressesBlob));
+ }
+ if (mtu >= 0) {
+ builder.setMtu(mtu);
+ }
+ return builder.build();
+ }
+
+ private static final String[] EXPIRY_COLUMN = new String[] {
+ NetworkAttributesContract.COLNAME_EXPIRYDATE
+ };
+ static final int EXPIRY_ERROR = -1; // Legal values for expiry are positive
+
+ static final String SELECT_L2KEY = NetworkAttributesContract.COLNAME_L2KEY + " = ?";
+
+ // Returns the expiry date of the specified row, or one of the error codes above if the
+ // row is not found or some other error
+ static long getExpiry(@NonNull final SQLiteDatabase db, @NonNull final String key) {
+ final Cursor cursor = db.query(NetworkAttributesContract.TABLENAME,
+ EXPIRY_COLUMN, // columns
+ SELECT_L2KEY, // selection
+ new String[] { key }, // selectionArgs
+ null, // groupBy
+ null, // having
+ null // orderBy
+ );
+ // L2KEY is the primary key ; it should not be possible to get more than one
+ // result here. 0 results means the key was not found.
+ if (cursor.getCount() != 1) return EXPIRY_ERROR;
+ cursor.moveToFirst();
+ final long result = cursor.getLong(0); // index in the EXPIRY_COLUMN array
+ cursor.close();
+ return result;
+ }
+
+ static final int RELEVANCE_ERROR = -1; // Legal values for relevance are positive
+
+ // Returns the relevance of the specified row, or one of the error codes above if the
+ // row is not found or some other error
+ static int getRelevance(@NonNull final SQLiteDatabase db, @NonNull final String key) {
+ final long expiry = getExpiry(db, key);
+ return expiry < 0 ? (int) expiry : RelevanceUtils.computeRelevanceForNow(expiry);
+ }
+
+ // If the attributes are null, this will only write the expiry.
+ // Returns an int out of Status.{SUCCESS,ERROR_*}
+ static int storeNetworkAttributes(@NonNull final SQLiteDatabase db, @NonNull final String key,
+ final long expiry, @Nullable final NetworkAttributes attributes) {
+ final ContentValues cv = toContentValues(key, attributes, expiry);
+ db.beginTransaction();
+ try {
+ // Unfortunately SQLite does not have any way to do INSERT OR UPDATE. Options are
+ // to either insert with on conflict ignore then update (like done here), or to
+ // construct a custom SQL INSERT statement with nested select.
+ final long resultId = db.insertWithOnConflict(NetworkAttributesContract.TABLENAME,
+ null, cv, SQLiteDatabase.CONFLICT_IGNORE);
+ if (resultId < 0) {
+ db.update(NetworkAttributesContract.TABLENAME, cv, SELECT_L2KEY, new String[]{key});
+ }
+ db.setTransactionSuccessful();
+ return Status.SUCCESS;
+ } catch (SQLiteException e) {
+ // No space left on disk or something
+ Log.e(TAG, "Could not write to the memory store", e);
+ } finally {
+ db.endTransaction();
+ }
+ return Status.ERROR_STORAGE;
+ }
+
+ // Returns an int out of Status.{SUCCESS,ERROR_*}
+ static int storeBlob(@NonNull final SQLiteDatabase db, @NonNull final String key,
+ @NonNull final String clientId, @NonNull final String name,
+ @NonNull final byte[] data) {
+ final long res = db.insertWithOnConflict(PrivateDataContract.TABLENAME, null,
+ toContentValues(key, clientId, name, data), SQLiteDatabase.CONFLICT_REPLACE);
+ return (res == -1) ? Status.ERROR_STORAGE : Status.SUCCESS;
+ }
+
+ @Nullable
+ static NetworkAttributes retrieveNetworkAttributes(@NonNull final SQLiteDatabase db,
+ @NonNull final String key) {
+ final Cursor cursor = db.query(NetworkAttributesContract.TABLENAME,
+ null, // columns, null means everything
+ NetworkAttributesContract.COLNAME_L2KEY + " = ?", // selection
+ new String[] { key }, // selectionArgs
+ null, // groupBy
+ null, // having
+ null); // orderBy
+ // L2KEY is the primary key ; it should not be possible to get more than one
+ // result here. 0 results means the key was not found.
+ if (cursor.getCount() != 1) return null;
+ cursor.moveToFirst();
+ final NetworkAttributes attributes = readNetworkAttributesLine(cursor);
+ cursor.close();
+ return attributes;
+ }
+
+ private static final String[] DATA_COLUMN = new String[] {
+ PrivateDataContract.COLNAME_DATA
+ };
+ @Nullable
+ static byte[] retrieveBlob(@NonNull final SQLiteDatabase db, @NonNull final String key,
+ @NonNull final String clientId, @NonNull final String name) {
+ final Cursor cursor = db.query(PrivateDataContract.TABLENAME,
+ DATA_COLUMN, // columns
+ PrivateDataContract.COLNAME_L2KEY + " = ? AND " // selection
+ + PrivateDataContract.COLNAME_CLIENT + " = ? AND "
+ + PrivateDataContract.COLNAME_DATANAME + " = ?",
+ new String[] { key, clientId, name }, // selectionArgs
+ null, // groupBy
+ null, // having
+ null); // orderBy
+ // The query above is querying by (composite) primary key, so it should not be possible to
+ // get more than one result here. 0 results means the key was not found.
+ if (cursor.getCount() != 1) return null;
+ cursor.moveToFirst();
+ final byte[] result = cursor.getBlob(0); // index in the DATA_COLUMN array
+ cursor.close();
+ return result;
+ }
+
+ /**
+ * The following is a horrible hack that is necessary because the Android SQLite API does not
+ * have a way to query a binary blob. This, almost certainly, is an overlook.
+ *
+ * The Android SQLite API has two family of methods : one for query that returns data, and
+ * one for more general SQL statements that can execute any statement but may not return
+ * anything. All the query methods, however, take only String[] for the arguments.
+ *
+ * In principle it is simple to write a function that will encode the binary blob in the
+ * way SQLite expects it. However, because the API forces the argument to be coerced into a
+ * String, the SQLiteQuery object generated by the default query methods will bind all
+ * arguments as Strings and SQL will *sanitize* them. This works okay for numeric types,
+ * but the format for blobs is x'<hex string>'. Note the presence of quotes, which will
+ * be sanitized, changing the contents of the field, and the query will fail to match the
+ * blob.
+ *
+ * As far as I can tell, there are two possible ways around this problem. The first one
+ * is to put the data in the query string and eschew it being an argument. This would
+ * require doing the sanitizing by hand. The other is to call bindBlob directly on the
+ * generated SQLiteQuery object, which not only is a lot less dangerous than rolling out
+ * sanitizing, but also will do the right thing if the underlying format ever changes.
+ *
+ * But none of the methods that take an SQLiteQuery object can return data ; this *must*
+ * be called with SQLiteDatabase#query. This object is not accessible from outside.
+ * However, there is a #query version that accepts a CursorFactory and this is pretty
+ * straightforward to implement as all the arguments are coming in and the SQLiteCursor
+ * class is public API.
+ * With this, it's possible to intercept the SQLiteQuery object, and assuming the args
+ * are available, to bind them directly and work around the API's oblivious coercion into
+ * Strings.
+ *
+ * This is really sad, but I don't see another way of having this work than this or the
+ * hand-rolled sanitizing, and this is the lesser evil.
+ */
+ private static class CustomCursorFactory implements SQLiteDatabase.CursorFactory {
+ @NonNull
+ private final ArrayList<Object> mArgs;
+ CustomCursorFactory(@NonNull final ArrayList<Object> args) {
+ mArgs = args;
+ }
+ @Override
+ public Cursor newCursor(final SQLiteDatabase db, final SQLiteCursorDriver masterQuery,
+ final String editTable,
+ final SQLiteQuery query) {
+ int index = 1; // bind is 1-indexed
+ for (final Object arg : mArgs) {
+ if (arg instanceof String) {
+ query.bindString(index++, (String) arg);
+ } else if (arg instanceof Long) {
+ query.bindLong(index++, (Long) arg);
+ } else if (arg instanceof Integer) {
+ query.bindLong(index++, Long.valueOf((Integer) arg));
+ } else if (arg instanceof byte[]) {
+ query.bindBlob(index++, (byte[]) arg);
+ } else {
+ throw new IllegalStateException("Unsupported type CustomCursorFactory "
+ + arg.getClass().toString());
+ }
+ }
+ return new SQLiteCursor(masterQuery, editTable, query);
+ }
+ }
+
+ // Returns the l2key of the closest match, if and only if it matches
+ // closely enough (as determined by group-closeness).
+ @Nullable
+ static String findClosestAttributes(@NonNull final SQLiteDatabase db,
+ @NonNull final NetworkAttributes attr) {
+ if (attr.isEmpty()) return null;
+ final ContentValues values = toContentValues(attr);
+
+ // Build the selection and args. To cut down on the number of lines to search, limit
+ // the search to those with at least one argument equals to the requested attributes.
+ // This works only because null attributes match only will not result in group-closeness.
+ final StringJoiner sj = new StringJoiner(" OR ");
+ final ArrayList<Object> args = new ArrayList<>();
+ args.add(System.currentTimeMillis());
+ for (final String field : values.keySet()) {
+ sj.add(field + " = ?");
+ args.add(values.get(field));
+ }
+
+ final String selection = NetworkAttributesContract.COLNAME_EXPIRYDATE + " > ? AND ("
+ + sj.toString() + ")";
+ final Cursor cursor = db.queryWithFactory(new CustomCursorFactory(args),
+ false, // distinct
+ NetworkAttributesContract.TABLENAME,
+ null, // columns, null means everything
+ selection, // selection
+ null, // selectionArgs, horrendously passed to the cursor factory instead
+ null, // groupBy
+ null, // having
+ null, // orderBy
+ null); // limit
+ if (cursor.getCount() <= 0) return null;
+ cursor.moveToFirst();
+ String bestKey = null;
+ float bestMatchConfidence = GROUPCLOSE_CONFIDENCE; // Never return a match worse than this.
+ while (!cursor.isAfterLast()) {
+ final NetworkAttributes read = readNetworkAttributesLine(cursor);
+ final float confidence = read.getNetworkGroupSamenessConfidence(attr);
+ if (confidence > bestMatchConfidence) {
+ bestKey = getString(cursor, NetworkAttributesContract.COLNAME_L2KEY);
+ bestMatchConfidence = confidence;
+ }
+ cursor.moveToNext();
+ }
+ cursor.close();
+ return bestKey;
+ }
+
+ // Helper methods
+ private static String getString(final Cursor cursor, final String columnName) {
+ final int columnIndex = cursor.getColumnIndex(columnName);
+ return (columnIndex >= 0) ? cursor.getString(columnIndex) : null;
+ }
+ private static byte[] getBlob(final Cursor cursor, final String columnName) {
+ final int columnIndex = cursor.getColumnIndex(columnName);
+ return (columnIndex >= 0) ? cursor.getBlob(columnIndex) : null;
+ }
+ private static int getInt(final Cursor cursor, final String columnName,
+ final int defaultValue) {
+ final int columnIndex = cursor.getColumnIndex(columnName);
+ return (columnIndex >= 0) ? cursor.getInt(columnIndex) : defaultValue;
+ }
+ private static long getLong(final Cursor cursor, final String columnName,
+ final long defaultValue) {
+ final int columnIndex = cursor.getColumnIndex(columnName);
+ return (columnIndex >= 0) ? cursor.getLong(columnIndex) : defaultValue;
+ }
+}
diff --git a/src/com/android/server/connectivity/ipmemorystore/IpMemoryStoreService.java b/src/com/android/server/connectivity/ipmemorystore/IpMemoryStoreService.java
new file mode 100644
index 0000000..f801b35
--- /dev/null
+++ b/src/com/android/server/connectivity/ipmemorystore/IpMemoryStoreService.java
@@ -0,0 +1,397 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.server.connectivity.ipmemorystore;
+
+import static android.net.ipmemorystore.Status.ERROR_DATABASE_CANNOT_BE_OPENED;
+import static android.net.ipmemorystore.Status.ERROR_GENERIC;
+import static android.net.ipmemorystore.Status.ERROR_ILLEGAL_ARGUMENT;
+import static android.net.ipmemorystore.Status.SUCCESS;
+
+import static com.android.server.connectivity.ipmemorystore.IpMemoryStoreDatabase.EXPIRY_ERROR;
+
+import android.annotation.NonNull;
+import android.annotation.Nullable;
+import android.content.Context;
+import android.database.SQLException;
+import android.database.sqlite.SQLiteDatabase;
+import android.net.IIpMemoryStore;
+import android.net.ipmemorystore.Blob;
+import android.net.ipmemorystore.IOnBlobRetrievedListener;
+import android.net.ipmemorystore.IOnL2KeyResponseListener;
+import android.net.ipmemorystore.IOnNetworkAttributesRetrieved;
+import android.net.ipmemorystore.IOnSameNetworkResponseListener;
+import android.net.ipmemorystore.IOnStatusListener;
+import android.net.ipmemorystore.NetworkAttributes;
+import android.net.ipmemorystore.NetworkAttributesParcelable;
+import android.net.ipmemorystore.SameL3NetworkResponse;
+import android.net.ipmemorystore.Status;
+import android.net.ipmemorystore.StatusParcelable;
+import android.os.RemoteException;
+import android.util.Log;
+
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+
+/**
+ * Implementation for the IP memory store.
+ * This component offers specialized services for network components to store and retrieve
+ * knowledge about networks, and provides intelligence that groups level 2 networks together
+ * into level 3 networks.
+ *
+ * @hide
+ */
+public class IpMemoryStoreService extends IIpMemoryStore.Stub {
+ private static final String TAG = IpMemoryStoreService.class.getSimpleName();
+ private static final int MAX_CONCURRENT_THREADS = 4;
+ private static final boolean DBG = true;
+
+ @NonNull
+ final Context mContext;
+ @Nullable
+ final SQLiteDatabase mDb;
+ @NonNull
+ final ExecutorService mExecutor;
+
+ /**
+ * Construct an IpMemoryStoreService object.
+ * This constructor will block on disk access to open the database.
+ * @param context the context to access storage with.
+ */
+ public IpMemoryStoreService(@NonNull final Context context) {
+ // Note that constructing the service will access the disk and block
+ // for some time, but it should make no difference to the clients. Because
+ // the interface is one-way, clients fire and forget requests, and the callback
+ // will get called eventually in any case, and the framework will wait for the
+ // service to be created to deliver subsequent requests.
+ // Avoiding this would mean the mDb member can't be final, which means the service would
+ // have to test for nullity, care for failure, and allow for a wait at every single access,
+ // which would make the code a lot more complex and require all methods to possibly block.
+ mContext = context;
+ SQLiteDatabase db;
+ final IpMemoryStoreDatabase.DbHelper helper = new IpMemoryStoreDatabase.DbHelper(context);
+ try {
+ db = helper.getWritableDatabase();
+ if (null == db) Log.e(TAG, "Unexpected null return of getWriteableDatabase");
+ } catch (final SQLException e) {
+ Log.e(TAG, "Can't open the Ip Memory Store database", e);
+ db = null;
+ } catch (final Exception e) {
+ Log.wtf(TAG, "Impossible exception Ip Memory Store database", e);
+ db = null;
+ }
+ mDb = db;
+ // The work-stealing thread pool executor will spawn threads as needed up to
+ // the max only when there is no free thread available. This generally behaves
+ // exactly like one would expect it intuitively :
+ // - When work arrives, it will spawn a new thread iff there are no available threads
+ // - When there is no work to do it will shutdown threads after a while (the while
+ // being equal to 2 seconds (not configurable) when max threads are spun up and
+ // twice as much for every one less thread)
+ // - When all threads are busy the work is enqueued and waits for any worker
+ // to become available.
+ // Because the stealing pool is made for very heavily parallel execution of
+ // small tasks that spawn others, it creates a queue per thread that in this
+ // case is overhead. However, the three behaviors above make it a superior
+ // choice to cached or fixedThreadPoolExecutor, neither of which can actually
+ // enqueue a task waiting for a thread to be free. This can probably be solved
+ // with judicious subclassing of ThreadPoolExecutor, but that's a lot of dangerous
+ // complexity for little benefit in this case.
+ mExecutor = Executors.newWorkStealingPool(MAX_CONCURRENT_THREADS);
+ }
+
+ /**
+ * Shutdown the memory store service, cancelling running tasks and dropping queued tasks.
+ *
+ * This is provided to give a way to clean up, and is meant to be available in case of an
+ * emergency shutdown.
+ */
+ public void shutdown() {
+ // By contrast with ExecutorService#shutdown, ExecutorService#shutdownNow tries
+ // to cancel the existing tasks, and does not wait for completion. It does not
+ // guarantee the threads can be terminated in any given amount of time.
+ mExecutor.shutdownNow();
+ if (mDb != null) mDb.close();
+ }
+
+ /** Helper function to make a status object */
+ private StatusParcelable makeStatus(final int code) {
+ return new Status(code).toParcelable();
+ }
+
+ /**
+ * Store network attributes for a given L2 key.
+ *
+ * @param l2Key The L2 key for the L2 network. Clients that don't know or care about the L2
+ * key and only care about grouping can pass a unique ID here like the ones
+ * generated by {@code java.util.UUID.randomUUID()}, but keep in mind the low
+ * relevance of such a network will lead to it being evicted soon if it's not
+ * refreshed. Use findL2Key to try and find a similar L2Key to these attributes.
+ * @param attributes The attributes for this network.
+ * @param listener A listener to inform of the completion of this call, or null if the client
+ * is not interested in learning about success/failure.
+ * Through the listener, returns the L2 key. This is useful if the L2 key was not specified.
+ * If the call failed, the L2 key will be null.
+ */
+ // Note that while l2Key and attributes are non-null in spirit, they are received from
+ // another process. If the remote process decides to ignore everything and send null, this
+ // process should still not crash.
+ @Override
+ public void storeNetworkAttributes(@Nullable final String l2Key,
+ @Nullable final NetworkAttributesParcelable attributes,
+ @Nullable final IOnStatusListener listener) {
+ // Because the parcelable is 100% mutable, the thread may not see its members initialized.
+ // Therefore either an immutable object is created on this same thread before it's passed
+ // to the executor, or there need to be a write barrier here and a read barrier in the
+ // remote thread.
+ final NetworkAttributes na = null == attributes ? null : new NetworkAttributes(attributes);
+ mExecutor.execute(() -> {
+ try {
+ final int code = storeNetworkAttributesAndBlobSync(l2Key, na,
+ null /* clientId */, null /* name */, null /* data */);
+ if (null != listener) listener.onComplete(makeStatus(code));
+ } catch (final RemoteException e) {
+ // Client at the other end died
+ }
+ });
+ }
+
+ /**
+ * Store a binary blob associated with an L2 key and a name.
+ *
+ * @param l2Key The L2 key for this network.
+ * @param clientId The ID of the client.
+ * @param name The name of this data.
+ * @param blob The data to store.
+ * @param listener The listener that will be invoked to return the answer, or null if the
+ * is not interested in learning about success/failure.
+ * Through the listener, returns a status to indicate success or failure.
+ */
+ @Override
+ public void storeBlob(@Nullable final String l2Key, @Nullable final String clientId,
+ @Nullable final String name, @Nullable final Blob blob,
+ @Nullable final IOnStatusListener listener) {
+ final byte[] data = null == blob ? null : blob.data;
+ mExecutor.execute(() -> {
+ try {
+ final int code = storeNetworkAttributesAndBlobSync(l2Key,
+ null /* NetworkAttributes */, clientId, name, data);
+ if (null != listener) listener.onComplete(makeStatus(code));
+ } catch (final RemoteException e) {
+ // Client at the other end died
+ }
+ });
+ }
+
+ /**
+ * Helper method for storeNetworkAttributes and storeBlob.
+ *
+ * Either attributes or none of clientId, name and data may be null. This will write the
+ * passed data if non-null, and will write attributes if non-null, but in any case it will
+ * bump the relevance up.
+ * Returns a success code from Status.
+ */
+ private int storeNetworkAttributesAndBlobSync(@Nullable final String l2Key,
+ @Nullable final NetworkAttributes attributes,
+ @Nullable final String clientId,
+ @Nullable final String name, @Nullable final byte[] data) {
+ if (null == l2Key) return ERROR_ILLEGAL_ARGUMENT;
+ if (null == attributes && null == data) return ERROR_ILLEGAL_ARGUMENT;
+ if (null != data && (null == clientId || null == name)) return ERROR_ILLEGAL_ARGUMENT;
+ if (null == mDb) return ERROR_DATABASE_CANNOT_BE_OPENED;
+ try {
+ final long oldExpiry = IpMemoryStoreDatabase.getExpiry(mDb, l2Key);
+ final long newExpiry = RelevanceUtils.bumpExpiryDate(
+ oldExpiry == EXPIRY_ERROR ? System.currentTimeMillis() : oldExpiry);
+ final int errorCode =
+ IpMemoryStoreDatabase.storeNetworkAttributes(mDb, l2Key, newExpiry, attributes);
+ // If no blob to store, the client is interested in the result of storing the attributes
+ if (null == data) return errorCode;
+ // Otherwise it's interested in the result of storing the blob
+ return IpMemoryStoreDatabase.storeBlob(mDb, l2Key, clientId, name, data);
+ } catch (Exception e) {
+ if (DBG) {
+ Log.e(TAG, "Exception while storing for key {" + l2Key
+ + "} ; NetworkAttributes {" + (null == attributes ? "null" : attributes)
+ + "} ; clientId {" + (null == clientId ? "null" : clientId)
+ + "} ; name {" + (null == name ? "null" : name)
+ + "} ; data {" + Utils.byteArrayToString(data) + "}", e);
+ }
+ }
+ return ERROR_GENERIC;
+ }
+
+ /**
+ * Returns the best L2 key associated with the attributes.
+ *
+ * This will find a record that would be in the same group as the passed attributes. This is
+ * useful to choose the key for storing a sample or private data when the L2 key is not known.
+ * If multiple records are group-close to these attributes, the closest match is returned.
+ * If multiple records have the same closeness, the one with the smaller (unicode codepoint
+ * order) L2 key is returned.
+ * If no record matches these attributes, null is returned.
+ *
+ * @param attributes The attributes of the network to find.
+ * @param listener The listener that will be invoked to return the answer.
+ * Through the listener, returns the L2 key if one matched, or null.
+ */
+ @Override
+ public void findL2Key(@Nullable final NetworkAttributesParcelable attributes,
+ @Nullable final IOnL2KeyResponseListener listener) {
+ if (null == listener) return;
+ mExecutor.execute(() -> {
+ try {
+ if (null == attributes) {
+ listener.onL2KeyResponse(makeStatus(ERROR_ILLEGAL_ARGUMENT), null);
+ return;
+ }
+ if (null == mDb) {
+ listener.onL2KeyResponse(makeStatus(ERROR_ILLEGAL_ARGUMENT), null);
+ return;
+ }
+ final String key = IpMemoryStoreDatabase.findClosestAttributes(mDb,
+ new NetworkAttributes(attributes));
+ listener.onL2KeyResponse(makeStatus(SUCCESS), key);
+ } catch (final RemoteException e) {
+ // Client at the other end died
+ }
+ });
+ }
+
+ /**
+ * Returns whether, to the best of the store's ability to tell, the two specified L2 keys point
+ * to the same L3 network. Group-closeness is used to determine this.
+ *
+ * @param l2Key1 The key for the first network.
+ * @param l2Key2 The key for the second network.
+ * @param listener The listener that will be invoked to return the answer.
+ * Through the listener, a SameL3NetworkResponse containing the answer and confidence.
+ */
+ @Override
+ public void isSameNetwork(@Nullable final String l2Key1, @Nullable final String l2Key2,
+ @Nullable final IOnSameNetworkResponseListener listener) {
+ if (null == listener) return;
+ mExecutor.execute(() -> {
+ try {
+ if (null == l2Key1 || null == l2Key2) {
+ listener.onSameNetworkResponse(makeStatus(ERROR_ILLEGAL_ARGUMENT), null);
+ return;
+ }
+ if (null == mDb) {
+ listener.onSameNetworkResponse(makeStatus(ERROR_ILLEGAL_ARGUMENT), null);
+ return;
+ }
+ try {
+ final NetworkAttributes attr1 =
+ IpMemoryStoreDatabase.retrieveNetworkAttributes(mDb, l2Key1);
+ final NetworkAttributes attr2 =
+ IpMemoryStoreDatabase.retrieveNetworkAttributes(mDb, l2Key2);
+ if (null == attr1 || null == attr2) {
+ listener.onSameNetworkResponse(makeStatus(SUCCESS),
+ new SameL3NetworkResponse(l2Key1, l2Key2,
+ -1f /* never connected */).toParcelable());
+ return;
+ }
+ final float confidence = attr1.getNetworkGroupSamenessConfidence(attr2);
+ listener.onSameNetworkResponse(makeStatus(SUCCESS),
+ new SameL3NetworkResponse(l2Key1, l2Key2, confidence).toParcelable());
+ } catch (Exception e) {
+ listener.onSameNetworkResponse(makeStatus(ERROR_GENERIC), null);
+ }
+ } catch (final RemoteException e) {
+ // Client at the other end died
+ }
+ });
+ }
+
+ /**
+ * Retrieve the network attributes for a key.
+ * If no record is present for this key, this will return null attributes.
+ *
+ * @param l2Key The key of the network to query.
+ * @param listener The listener that will be invoked to return the answer.
+ * Through the listener, returns the network attributes and the L2 key associated with
+ * the query.
+ */
+ @Override
+ public void retrieveNetworkAttributes(@Nullable final String l2Key,
+ @Nullable final IOnNetworkAttributesRetrieved listener) {
+ if (null == listener) return;
+ mExecutor.execute(() -> {
+ try {
+ if (null == l2Key) {
+ listener.onNetworkAttributesRetrieved(
+ makeStatus(ERROR_ILLEGAL_ARGUMENT), l2Key, null);
+ return;
+ }
+ if (null == mDb) {
+ listener.onNetworkAttributesRetrieved(
+ makeStatus(ERROR_DATABASE_CANNOT_BE_OPENED), l2Key, null);
+ return;
+ }
+ try {
+ final NetworkAttributes attributes =
+ IpMemoryStoreDatabase.retrieveNetworkAttributes(mDb, l2Key);
+ listener.onNetworkAttributesRetrieved(makeStatus(SUCCESS), l2Key,
+ null == attributes ? null : attributes.toParcelable());
+ } catch (final Exception e) {
+ listener.onNetworkAttributesRetrieved(makeStatus(ERROR_GENERIC), l2Key, null);
+ }
+ } catch (final RemoteException e) {
+ // Client at the other end died
+ }
+ });
+ }
+
+ /**
+ * Retrieve previously stored private data.
+ * If no data was stored for this L2 key and name this will return null.
+ *
+ * @param l2Key The L2 key.
+ * @param clientId The id of the client that stored this data.
+ * @param name The name of the data.
+ * @param listener The listener that will be invoked to return the answer.
+ * Through the listener, returns the private data if any or null if none, with the L2 key
+ * and the name of the data associated with the query.
+ */
+ @Override
+ public void retrieveBlob(@NonNull final String l2Key, @NonNull final String clientId,
+ @NonNull final String name, @NonNull final IOnBlobRetrievedListener listener) {
+ if (null == listener) return;
+ mExecutor.execute(() -> {
+ try {
+ if (null == l2Key) {
+ listener.onBlobRetrieved(makeStatus(ERROR_ILLEGAL_ARGUMENT), l2Key, name, null);
+ return;
+ }
+ if (null == mDb) {
+ listener.onBlobRetrieved(makeStatus(ERROR_DATABASE_CANNOT_BE_OPENED), l2Key,
+ name, null);
+ return;
+ }
+ try {
+ final Blob b = new Blob();
+ b.data = IpMemoryStoreDatabase.retrieveBlob(mDb, l2Key, clientId, name);
+ listener.onBlobRetrieved(makeStatus(SUCCESS), l2Key, name, b);
+ } catch (final Exception e) {
+ listener.onBlobRetrieved(makeStatus(ERROR_GENERIC), l2Key, name, null);
+ }
+ } catch (final RemoteException e) {
+ // Client at the other end died
+ }
+ });
+ }
+}
diff --git a/src/com/android/server/connectivity/ipmemorystore/RelevanceUtils.java b/src/com/android/server/connectivity/ipmemorystore/RelevanceUtils.java
new file mode 100644
index 0000000..38d5544
--- /dev/null
+++ b/src/com/android/server/connectivity/ipmemorystore/RelevanceUtils.java
@@ -0,0 +1,307 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.server.connectivity.ipmemorystore;
+
+import com.android.internal.annotations.VisibleForTesting;
+
+/**
+ * A class containing the logic around the relevance value for
+ * IP Memory Store.
+ *
+ * @hide
+ */
+public class RelevanceUtils {
+ /**
+ * The relevance is a decaying value that gets lower and lower until it
+ * reaches 0 after some time passes. It follows an exponential decay law,
+ * dropping slowly at first then faster and faster, because a network is
+ * likely to be visited again if it was visited not long ago, and the longer
+ * it hasn't been visited the more likely it is that it won't be visited
+ * again. For example, a network visited on holiday should stay fresh for
+ * the duration of the holiday and persist for a while, but after the venue
+ * hasn't been visited for a while it should quickly be discarded. What
+ * should accelerate forgetting the network is extended periods without
+ * visits, so that occasional venues get discarded but regular visits keep
+ * the network relevant, even if the visits are infrequent.
+ *
+ * This function must be stable by iteration, meaning that adjusting the same value
+ * for different dates iteratively multiple times should give the same result.
+ * Formally, if f is the decay function that associates a relevance x at a date d1
+ * to the value at ulterior date d3, then for any date d2 between d1 and d3 :
+ * f(x, d3 - d1) = f(f(x, d3 - d2), d2 - d1). Intuitively, this property simply
+ * means it should be the same to compute and store back the value after two months,
+ * or to do it once after one month, store it back, and do it again after another
+ * months has passed.
+ * The pair of the relevance and date define the entire curve, so any pair
+ * of values on the curve will define the same curve. Setting one of them to a
+ * constant, so as not to have to store it, means the other one will always suffice
+ * to describe the curve. For example, only storing the date for a known, constant
+ * value of the relevance is an efficient way of remembering this information (and
+ * to compare relevances together, as f is monotonically decreasing).
+ *
+ *** Choosing the function :
+ * Functions of the kind described above are standard exponential decay functions
+ * like the ones that govern atomic decay where the value at any given date can be
+ * computed uniformly from the value at a previous date and the time elapsed since
+ * that date. It is simple to picture this kind of function as one where after a
+ * given period of time called the half-life, the relevance value will have been
+ * halved. Decay of this kind is expressed in function of the previous value by
+ * functions like
+ * f(x, t) = x * F ^ (t / L)
+ * ...where x is the value, t is the elapsed time, L is the half-life (or more
+ * generally the F-th-life) and F the decay factor (typically 0.5, hence why L is
+ * usually called the half-life). The ^ symbol here is used for exponentiation.
+ * Or, starting at a given M for t = 0 :
+ * f(t) = M * F ^ (t / L)
+ *
+ * Because a line in the store needs to become irrelevant at some point but
+ * this class of functions never go to 0, a minimum cutoff has to be chosen to
+ * represent irrelevance. The simpler way of doing this is to simply add this
+ * minimum cutoff to the computation before and removing it after.
+ * Thus the function becomes :
+ * f(x, t) = ((x + K) * F ^ (t / L)) - K
+ * ...where K is the minimum cutoff, L the half-life, and F the factor between
+ * the original x and x after its half-life. Strictly speaking using the word
+ * "half-life" implies that F = 0.5, but the relation works for any value of F.
+ *
+ * It is easy enough to check that this function satisfies the stability
+ * relation that was given above for any value of F, L and K, which become
+ * parameters that can be defined at will.
+ *
+ * relevance
+ * 1.0 |
+ * |\
+ * | \
+ * | \ (this graph rendered with L = 75 days and K = 1/40)
+ * 0.75| ',
+ * | \
+ * | '.
+ * | \.
+ * | \
+ * 0.5 | '\
+ * | ''.
+ * | ''.
+ * | ''.
+ * 0.25| '''..
+ * | '''..
+ * | ''''....
+ * | '''''..........
+ * 0 +-------------------------------------------------------''''''''''----
+ * 0 50 100 150 200 250 300 350 400 days
+ *
+ *** Choosing the parameters
+ * The maximum M is an arbitrary parameter that simply scales the curve.
+ * The tradeoff for M is pretty simple : if the relevance is going to be an
+ * integer, the bigger M is the more precision there is in the relevance.
+ * However, values of M that are easy for humans to read are preferable to
+ * help debugging, and a suitably low value may be enough to ensure there
+ * won't be integer overflows in intermediate computations.
+ * A value of 1_000_000 probably is plenty for precision, while still in the
+ * low range of what ints can represent.
+ *
+ * F and L are parameters to be chosen arbitrarily and have an impact on how
+ * fast the relevance will be decaying at first, keeping in mind that
+ * the 400 days value and the cap stay the same. In simpler words, F and L
+ * define the steepness of the curve.
+ * To keep things simple (and familiar) F is arbitrarily chosen to be 0.5, and
+ * L is set to 200 days visually to achieve the desired effect. Refer to the
+ * illustration above to get a feel of how that feels.
+ *
+ * Moreover, the memory store works on an assumption that the relevance should
+ * be capped, and that an entry with capped relevance should decay in 400 days.
+ * This is on premises that the networks a device will need to remember the
+ * longest should be networks visited about once a year.
+ * For this reason, the relevance is at the maximum M 400 days before expiry :
+ * f(M, 400 days) = 0
+ * From replacing this with the value of the function, K can then be derived
+ * from the values of M, F and L :
+ * (M + K) * F ^ (t / L) - K = 0
+ * K = M * F ^ (400 days / L) / (1 - F ^ (400 days / L))
+ * Replacing with actual values this gives :
+ * K = 1_000_000 * 0.5 ^ (400 / 200) / (1 - 0.5 ^ (400 / 200))
+ * = 1_000_000 / 3 ≈ 333_333.3
+ * This ensures the function has the desired profile, the desired value at
+ * cap, and the desired value at expiry.
+ *
+ *** Useful relations
+ * Let's define the expiry time for any given relevance x as the interval of
+ * time such as :
+ * f(x, expiry) = 0
+ * which can be rewritten
+ * ((x + K) * F ^ (expiry / L)) = K
+ * ...giving an expression of the expiry in function of the relevance x as
+ * expiry = L * logF(K / (x + K))
+ * Conversely the relevance x can be expressed in function of the expiry as
+ * x = K / F ^ (expiry / L) - K
+ * These relations are useful in utility functions.
+ *
+ *** Bumping things up
+ * The last issue therefore is to decide how to bump up the relevance. The
+ * simple approach is to simply lift up the curve a little bit by a constant
+ * normalized amount, delaying the time of expiry. For example increasing
+ * the relevance by an amount I gives :
+ * x2 = x1 + I
+ * x2 and x1 correspond to two different expiry times expiry2 and expiry1,
+ * and replacing x1 and x2 in the relation above with their expression in
+ * function of the expiry comes :
+ * K / F ^ (expiry2 / L) - K = K / F ^ (expiry1 / L) - K + I
+ * which resolves to :
+ * expiry2 = L * logF(K / (I + K / F ^ (expiry1 / L)))
+ *
+ * In this implementation, the bump is defined as 1/25th of the cap for
+ * the relevance. This means a network will be remembered for the maximum
+ * period of 400 days if connected 25 times in succession not accounting
+ * for decay. Of course decay actually happens so it will take more than 25
+ * connections for any given network to actually reach the cap, but because
+ * decay is slow at first, it is a good estimate of how fast cap happens.
+ *
+ * Specifically, it gives the following four results :
+ * - A network that a device connects to once hits irrelevance about 32.7 days after
+ * it was first registered if never connected again.
+ * - A network that a device connects to once a day at a fixed hour will hit the cap
+ * on the 27th connection.
+ * - A network that a device connects to once a week at a fixed hour will hit the cap
+ * on the 57th connection.
+ * - A network that a device connects to every day for 7 straight days then never again
+ * expires 144 days after the last connection.
+ * These metrics tend to match pretty well the requirements.
+ */
+
+ // TODO : make these constants configurable at runtime. Don't forget to build it so that
+ // changes will wipe the database, migrate the values, or otherwise make sure the relevance
+ // values are still meaningful.
+
+ // How long, in milliseconds, is a capped relevance valid for, or in other
+ // words how many milliseconds after its relevance was set to RELEVANCE_CAP does
+ // any given line expire. 400 days.
+ @VisibleForTesting
+ public static final long CAPPED_RELEVANCE_LIFETIME_MS = 400L * 24 * 60 * 60 * 1000;
+
+ // The constant that represents a normalized 1.0 value for the relevance. In other words,
+ // the cap for the relevance. This is referred to as M in the explanation above.
+ @VisibleForTesting
+ public static final int CAPPED_RELEVANCE = 1_000_000;
+
+ // The decay factor. After a half-life, the relevance will have decayed by this value.
+ // This is referred to as F in the explanation above.
+ private static final double DECAY_FACTOR = 0.5;
+
+ // The half-life. After this time, the relevance will have decayed by a factor DECAY_FACTOR.
+ // This is referred to as L in the explanation above.
+ private static final long HALF_LIFE_MS = 200L * 24 * 60 * 60 * 1000;
+
+ // The value of the frame change. This is referred to as K in the explanation above.
+ private static final double IRRELEVANCE_FLOOR =
+ CAPPED_RELEVANCE * powF((double) CAPPED_RELEVANCE_LIFETIME_MS / HALF_LIFE_MS)
+ / (1 - powF((double) CAPPED_RELEVANCE_LIFETIME_MS / HALF_LIFE_MS));
+
+ // How much to bump the relevance by every time a line is written to.
+ @VisibleForTesting
+ public static final int RELEVANCE_BUMP = CAPPED_RELEVANCE / 25;
+
+ // Java doesn't include a function for the logarithm in an arbitrary base, so implement it
+ private static final double LOG_DECAY_FACTOR = Math.log(DECAY_FACTOR);
+ private static double logF(final double value) {
+ return Math.log(value) / LOG_DECAY_FACTOR;
+ }
+
+ // Utility function to get a power of the decay factor, to simplify the code.
+ private static double powF(final double value) {
+ return Math.pow(DECAY_FACTOR, value);
+ }
+
+ /**
+ * Compute the value of the relevance now given an expiry date.
+ *
+ * @param expiry the date at which the column in the database expires.
+ * @return the adjusted value of the relevance for this moment in time.
+ */
+ public static int computeRelevanceForNow(final long expiry) {
+ return computeRelevanceForTargetDate(expiry, System.currentTimeMillis());
+ }
+
+ /**
+ * Compute the value of the relevance at a given date from an expiry date.
+ *
+ * Because relevance decays with time, a relevance in the past corresponds to
+ * a different relevance later.
+ *
+ * Relevance is always a positive value. 0 means not relevant at all.
+ *
+ * See the explanation at the top of this file to get the justification for this
+ * computation.
+ *
+ * @param expiry the date at which the column in the database expires.
+ * @param target the target date to adjust the relevance to.
+ * @return the adjusted value of the relevance for the target moment.
+ */
+ public static int computeRelevanceForTargetDate(final long expiry, final long target) {
+ final long delay = expiry - target;
+ if (delay >= CAPPED_RELEVANCE_LIFETIME_MS) return CAPPED_RELEVANCE;
+ if (delay <= 0) return 0;
+ return (int) (IRRELEVANCE_FLOOR / powF((float) delay / HALF_LIFE_MS) - IRRELEVANCE_FLOOR);
+ }
+
+ /**
+ * Compute the expiry duration adjusted up for a new fresh write.
+ *
+ * Every time data is written to the memory store for a given line, the
+ * relevance is bumped up by a certain amount, which will boost the priority
+ * of this line for computation of group attributes, and delay (possibly
+ * indefinitely, if the line is accessed regularly) forgetting the data stored
+ * in that line.
+ * As opposed to bumpExpiryDate, this function uses a duration from now to expiry.
+ *
+ * See the explanation at the top of this file for a justification of this computation.
+ *
+ * @param oldExpiryDuration the old expiry duration in milliseconds from now.
+ * @return the expiry duration representing a bumped up relevance value.
+ */
+ public static long bumpExpiryDuration(final long oldExpiryDuration) {
+ // L * logF(K / (I + K / F ^ (expiry1 / L))), as documented above
+ final double divisionFactor = powF(((double) oldExpiryDuration) / HALF_LIFE_MS);
+ final double oldRelevance = IRRELEVANCE_FLOOR / divisionFactor;
+ final long newDuration =
+ (long) (HALF_LIFE_MS * logF(IRRELEVANCE_FLOOR / (RELEVANCE_BUMP + oldRelevance)));
+ return Math.min(newDuration, CAPPED_RELEVANCE_LIFETIME_MS);
+ }
+
+ /**
+ * Compute the new expiry date adjusted up for a new fresh write.
+ *
+ * Every time data is written to the memory store for a given line, the
+ * relevance is bumped up by a certain amount, which will boost the priority
+ * of this line for computation of group attributes, and delay (possibly
+ * indefinitely, if the line is accessed regularly) forgetting the data stored
+ * in that line.
+ * As opposed to bumpExpiryDuration, this function takes the old timestamp and returns the
+ * new timestamp.
+ *
+ * {@see bumpExpiryDuration}, and keep in mind that the bump depends on when this is called,
+ * because the relevance decays exponentially, therefore bumping up a high relevance (for a
+ * date far in the future) is less potent than bumping up a low relevance (for a date in
+ * a close future).
+ *
+ * @param oldExpiryDate the old date of expiration.
+ * @return the new expiration date after the relevance bump.
+ */
+ public static long bumpExpiryDate(final long oldExpiryDate) {
+ final long now = System.currentTimeMillis();
+ final long newDuration = bumpExpiryDuration(oldExpiryDate - now);
+ return now + newDuration;
+ }
+}
diff --git a/src/com/android/server/connectivity/ipmemorystore/Utils.java b/src/com/android/server/connectivity/ipmemorystore/Utils.java
new file mode 100644
index 0000000..9cbf490
--- /dev/null
+++ b/src/com/android/server/connectivity/ipmemorystore/Utils.java
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.server.connectivity.ipmemorystore;
+
+import android.annotation.NonNull;
+import android.annotation.Nullable;
+import android.net.ipmemorystore.Blob;
+
+/** {@hide} */
+public class Utils {
+ /** Pretty print */
+ public static String blobToString(@Nullable final Blob blob) {
+ return "Blob : " + byteArrayToString(null == blob ? null : blob.data);
+ }
+
+ /** Pretty print */
+ public static String byteArrayToString(@Nullable final byte[] data) {
+ if (null == data) return "null";
+ final StringBuilder sb = new StringBuilder("[");
+ if (data.length <= 24) {
+ appendByteArray(sb, data, 0, data.length);
+ } else {
+ appendByteArray(sb, data, 0, 16);
+ sb.append("...");
+ appendByteArray(sb, data, data.length - 8, data.length);
+ }
+ sb.append("]");
+ return sb.toString();
+ }
+
+ // Adds the hex representation of the array between the specified indices (inclusive, exclusive)
+ private static void appendByteArray(@NonNull final StringBuilder sb, @NonNull final byte[] ar,
+ final int from, final int to) {
+ for (int i = from; i < to; ++i) {
+ sb.append(String.format("%02X", ar[i]));
+ }
+ }
+}