Skip to content

Commit

Permalink
Merge pull request #120 from TheBlueMatt/main
Browse files Browse the repository at this point in the history
Update to 0.0.112 (with RGS)
  • Loading branch information
TheBlueMatt authored Oct 28, 2022
2 parents 384d4ce + e0fec34 commit 0de4267
Show file tree
Hide file tree
Showing 611 changed files with 26,028 additions and 9,269 deletions.
20 changes: 10 additions & 10 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -39,11 +39,11 @@ jobs:
# Note this is a different endpoint, as we need one non-upstream commit!
git clone https://git.bitcoin.ninja/rust-lightning
cd rust-lightning
git checkout origin/2022-09-111-java-bindings
git checkout origin/2022-10-112-java-bindings
cd ..
git clone https://github.com/lightningdevkit/ldk-c-bindings
cd ldk-c-bindings
git checkout 0.0.111
git checkout 0.0.112
- name: Rebuild C bindings without STD
run: |
cd ldk-c-bindings
Expand Down Expand Up @@ -130,11 +130,11 @@ jobs:
# Note this is a different endpoint, as we need one non-upstream commit!
git clone https://git.bitcoin.ninja/rust-lightning
cd rust-lightning
git checkout origin/2022-09-111-java-bindings
git checkout origin/2022-10-112-java-bindings
cd ..
git clone https://github.com/lightningdevkit/ldk-c-bindings
cd ldk-c-bindings
git checkout 0.0.111
git checkout 0.0.112
- name: Rebuild C bindings, and check the sample app builds + links
run: |
cd ldk-c-bindings
Expand Down Expand Up @@ -185,11 +185,11 @@ jobs:
# Note this is a different endpoint, as we need one non-upstream commit!
git clone https://git.bitcoin.ninja/rust-lightning
cd rust-lightning
git checkout origin/2022-09-111-java-bindings
git checkout origin/2022-10-112-java-bindings
cd ..
git clone https://github.com/lightningdevkit/ldk-c-bindings
cd ldk-c-bindings
git checkout 0.0.111
git checkout 0.0.112
- name: Rebuild C bindings, and check the sample app builds + links
run: |
cd ldk-c-bindings
Expand Down Expand Up @@ -288,11 +288,11 @@ jobs:
# Note this is a different endpoint, as we need one non-upstream commit!
git clone https://git.bitcoin.ninja/rust-lightning
cd rust-lightning
git checkout origin/2022-09-111-java-bindings
git checkout origin/2022-10-112-java-bindings
cd ..
git clone https://github.com/lightningdevkit/ldk-c-bindings
cd ldk-c-bindings
git checkout 0.0.111
git checkout 0.0.112
- name: Checkout Android AAR binaries and artifacts
run: |
# Gitweb only allows snapshots of folders by providing the object hash, which we have to extract:
Expand Down Expand Up @@ -368,11 +368,11 @@ jobs:
# Note this is a different endpoint, as we need one non-upstream commit!
git clone https://git.bitcoin.ninja/rust-lightning
cd rust-lightning
git checkout origin/2022-09-111-java-bindings
git checkout origin/2022-10-112-java-bindings
cd ..
git clone https://github.com/lightningdevkit/ldk-c-bindings
cd ldk-c-bindings
git checkout 0.0.111
git checkout 0.0.112
- name: Rebuild C bindings with upstream clang, and check the sample app builds + links
run: |
export PATH=`pwd`/clang+llvm-14.0.5-x86_64-apple-darwin/bin:$PATH
Expand Down
22 changes: 17 additions & 5 deletions gen_type_mapping.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,10 +190,19 @@ def _do_map_type_with_info(self, ty_info, print_void, ret_arr_len, is_free, hold
if subty.to_hu_conv is not None:
to_hu_conv = self.consts.var_decl_statement(self.consts.c_type_map["uint32_t"][0], conv_name + "_len", self.consts.get_java_arr_len(arr_name)) + ";\n"
to_hu_conv += self.consts.var_decl_statement(ty_info.java_hu_ty, conv_name + "_arr", self.consts.constr_hu_array(ty_info, conv_name + "_len"))
to_hu_conv += ";\n" + self.consts.for_n_in_range(idxc, "0", conv_name + "_len") + "\n"
to_hu_conv += "\t" + self.consts.var_decl_statement(subty.java_ty, conv_name, self.consts.get_java_arr_elem(subty, arr_name, idxc)) + ";\n"
to_hu_conv += "\t" + subty.to_hu_conv.replace("\n", "\n\t") + "\n"
to_hu_conv += "\t" + conv_name + "_arr[" + idxc + "] = " + subty.to_hu_conv_name + ";\n}"
to_hu_conv += ";\n"
pfx = ""
if is_nullable:
to_hu_conv += "if (" + arr_name + " != null) {\n"
pfx = "\t"
to_hu_conv += pfx + self.consts.for_n_in_range(idxc, "0", conv_name + "_len") + "\n"

to_hu_conv += pfx + "\t" + self.consts.var_decl_statement(subty.java_ty, conv_name, self.consts.get_java_arr_elem(subty, arr_name, idxc)) + ";\n"
to_hu_conv += pfx + "\t" + subty.to_hu_conv.replace("\n", "\n\t" + pfx) + "\n"
to_hu_conv += pfx + "\t" + conv_name + "_arr[" + idxc + "] = " + subty.to_hu_conv_name + ";\n"
to_hu_conv += pfx + "}"
if is_nullable:
to_hu_conv += "\n}"
cleanup = self.consts.cleanup_converted_native_array(ty_info, arr_name)
if cleanup is not None:
to_hu_conv += "\n" + cleanup
Expand All @@ -209,7 +218,10 @@ def _do_map_type_with_info(self, ty_info, print_void, ret_arr_len, is_free, hold
hu_conv_b = ""
if subty.from_hu_conv[1] != "":
iterator = self.consts.for_n_in_arr(conv_name, arr_name, subty)
hu_conv_b = iterator[0] + subty.from_hu_conv[1] + ";" + iterator[1]
if is_nullable:
hu_conv_b = "if (" + arr_name + " != null) { " + iterator[0] + subty.from_hu_conv[1] + ";" + iterator[1] + " }"
else:
hu_conv_b = iterator[0] + subty.from_hu_conv[1] + ";" + iterator[1]
if from_hu_conv is not None:
arr_conv = self.consts.primitive_arr_from_hu(ty_info.subty, None, self.consts.map_hu_array_elems(arr_name, conv_name, ty_info, subty))
assert arr_conv[1] == ""
Expand Down
4 changes: 2 additions & 2 deletions java_strings.py
Original file line number Diff line number Diff line change
Expand Up @@ -1013,8 +1013,8 @@ def native_c_map_trait(self, struct_name, field_vars, flattened_field_vars, fiel
if fn_line.ret_ty_info.c_ty.endswith("Array"):
out_c = out_c + "\t" + fn_line.ret_ty_info.c_ty + " ret = (*env)->CallObjectMethod(env, obj, j_calls->" + fn_line.fn_name + "_meth"
elif fn_line.ret_ty_info.c_ty == "void":
out_c += "\t(*env)->Call" + fn_line.ret_ty_info.java_ty.title() + "Method(env, obj, j_calls->" + fn_line.fn_name + "_meth"
elif fn_line.ret_ty_info.java_hu_ty == "String":
out_c += "\t(*env)->CallVoidMethod(env, obj, j_calls->" + fn_line.fn_name + "_meth"
elif fn_line.ret_ty_info.java_hu_ty == "String" or "org/ldk/enums" in fn_line.ret_ty_info.java_fn_ty_arg:
# Manually write out String methods as they're just an Object
out_c += "\t" + fn_line.ret_ty_info.c_ty + " ret = (*env)->CallObjectMethod(env, obj, j_calls->" + fn_line.fn_name + "_meth"
elif not fn_line.ret_ty_info.passed_as_ptr:
Expand Down
4 changes: 2 additions & 2 deletions node-net/test/test.mts
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ const chan_handler = ldk.ErroringMessageHandler.constructor_new().as_ChannelMess
const cust_handler = ldk.IgnoringMessageHandler.constructor_new().as_CustomMessageHandler();
const onion_handler = ldk.IgnoringMessageHandler.constructor_new().as_OnionMessageHandler();

const a_pm = ldk.PeerManager.constructor_new(chan_handler, routing_handler, onion_handler, node_a_secret, 0xdeadbeefn, rng_seed, logger_a, cust_handler);
const a_pm = ldk.PeerManager.constructor_new(chan_handler, routing_handler, onion_handler, node_a_secret, 0xdeadbeef, rng_seed, logger_a, cust_handler);
const a_net_handler = new node_net.NodeLDKNet(a_pm);
var port = 10000;
for (; port < 11000; port++) {
Expand All @@ -38,7 +38,7 @@ for (; port < 11000; port++) {
} catch(_) {}
}

const b_pm = ldk.PeerManager.constructor_new(chan_handler, routing_handler, onion_handler, node_b_secret, 0xdeadbeefn, rng_seed, logger_b, cust_handler);
const b_pm = ldk.PeerManager.constructor_new(chan_handler, routing_handler, onion_handler, node_b_secret, 0xdeadbeef, rng_seed, logger_b, cust_handler);
const b_net_handler = new node_net.NodeLDKNet(b_pm);
await b_net_handler.connect_peer("127.0.0.1", port, node_a_pk);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -132,15 +132,15 @@ public ChannelManagerConstructor(byte[] channel_manager_serialized, byte[][] cha
graph_msg_handler.as_RoutingMessageHandler(),
ignoring_handler.as_OnionMessageHandler(),
((Result_SecretKeyNoneZ.Result_SecretKeyNoneZ_OK)node_secret).res,
System.currentTimeMillis() / 1000,
(int)(System.currentTimeMillis() / 1000),
random_data, logger, ignoring_handler.as_CustomMessageHandler());
} else {
this.graph_msg_handler = null;
this.peer_manager = PeerManager.of(channel_manager.as_ChannelMessageHandler(),
ignoring_handler.as_RoutingMessageHandler(),
ignoring_handler.as_OnionMessageHandler(),
((Result_SecretKeyNoneZ.Result_SecretKeyNoneZ_OK)node_secret).res,
System.currentTimeMillis() / 1000,
(int)(System.currentTimeMillis() / 1000),
random_data, logger, ignoring_handler.as_CustomMessageHandler());
}
NioPeerHandler nio_peer_handler = null;
Expand Down Expand Up @@ -184,15 +184,15 @@ public ChannelManagerConstructor(Network network, UserConfig config, byte[] curr
graph_msg_handler.as_RoutingMessageHandler(),
ignoring_handler.as_OnionMessageHandler(),
((Result_SecretKeyNoneZ.Result_SecretKeyNoneZ_OK)node_secret).res,
System.currentTimeMillis() / 1000,
(int)(System.currentTimeMillis() / 1000),
random_data, logger, ignoring_handler.as_CustomMessageHandler());
} else {
this.graph_msg_handler = null;
this.peer_manager = PeerManager.of(channel_manager.as_ChannelMessageHandler(),
ignoring_handler.as_RoutingMessageHandler(),
ignoring_handler.as_OnionMessageHandler(),
((Result_SecretKeyNoneZ.Result_SecretKeyNoneZ_OK)node_secret).res,
System.currentTimeMillis() / 1000,
(int)(System.currentTimeMillis() / 1000),
random_data, logger, ignoring_handler.as_CustomMessageHandler());
}
NioPeerHandler nio_peer_handler = null;
Expand Down
73 changes: 0 additions & 73 deletions src/main/java/org/ldk/enums/ChannelMonitorUpdateErr.java

This file was deleted.

88 changes: 88 additions & 0 deletions src/main/java/org/ldk/enums/ChannelMonitorUpdateStatus.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
package org.ldk.enums;

/**
* An enum representing the status of a channel monitor update persistence.
*/
public enum ChannelMonitorUpdateStatus {
/**
* The update has been durably persisted and all copies of the relevant [`ChannelMonitor`]
* have been updated.
*
* This includes performing any `fsync()` calls required to ensure the update is guaranteed to
* be available on restart even if the application crashes.
*/
LDKChannelMonitorUpdateStatus_Completed,
/**
* Used to indicate a temporary failure (eg connection to a watchtower or remote backup of
* our state failed, but is expected to succeed at some point in the future).
*
* Such a failure will \"freeze\" a channel, preventing us from revoking old states or
* submitting new commitment transactions to the counterparty. Once the update(s) which failed
* have been successfully applied, a [`MonitorEvent::Completed`] can be used to restore the
* channel to an operational state.
*
* Note that a given [`ChannelManager`] will *never* re-generate a [`ChannelMonitorUpdate`].
* If you return this error you must ensure that it is written to disk safely before writing
* the latest [`ChannelManager`] state, or you should return [`PermanentFailure`] instead.
*
* Even when a channel has been \"frozen\", updates to the [`ChannelMonitor`] can continue to
* occur (e.g. if an inbound HTLC which we forwarded was claimed upstream, resulting in us
* attempting to claim it on this channel) and those updates must still be persisted.
*
* No updates to the channel will be made which could invalidate other [`ChannelMonitor`]s
* until a [`MonitorEvent::Completed`] is provided, even if you return no error on a later
* monitor update for the same channel.
*
* For deployments where a copy of ChannelMonitors and other local state are backed up in a
* remote location (with local copies persisted immediately), it is anticipated that all
* updates will return [`InProgress`] until the remote copies could be updated.
*
* [`PermanentFailure`]: ChannelMonitorUpdateStatus::PermanentFailure
* [`InProgress`]: ChannelMonitorUpdateStatus::InProgress
* [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
*/
LDKChannelMonitorUpdateStatus_InProgress,
/**
* Used to indicate no further channel monitor updates will be allowed (likely a disk failure
* or a remote copy of this [`ChannelMonitor`] is no longer reachable and thus not updatable).
*
* When this is returned, [`ChannelManager`] will force-close the channel but *not* broadcast
* our current commitment transaction. This avoids a dangerous case where a local disk failure
* (e.g. the Linux-default remounting of the disk as read-only) causes [`PermanentFailure`]s
* for all monitor updates. If we were to broadcast our latest commitment transaction and then
* restart, we could end up reading a previous [`ChannelMonitor`] and [`ChannelManager`],
* revoking our now-broadcasted state before seeing it confirm and losing all our funds.
*
* Note that this is somewhat of a tradeoff - if the disk is really gone and we may have lost
* the data permanently, we really should broadcast immediately. If the data can be recovered
* with manual intervention, we'd rather close the channel, rejecting future updates to it,
* and broadcast the latest state only if we have HTLCs to claim which are timing out (which
* we do as long as blocks are connected).
*
* In order to broadcast the latest local commitment transaction, you'll need to call
* [`ChannelMonitor::get_latest_holder_commitment_txn`] and broadcast the resulting
* transactions once you've safely ensured no further channel updates can be generated by your
* [`ChannelManager`].
*
* Note that at least one final [`ChannelMonitorUpdate`] may still be provided, which must
* still be processed by a running [`ChannelMonitor`]. This final update will mark the
* [`ChannelMonitor`] as finalized, ensuring no further updates (e.g. revocation of the latest
* commitment transaction) are allowed.
*
* Note that even if you return a [`PermanentFailure`] due to unavailability of secondary
* [`ChannelMonitor`] copies, you should still make an attempt to store the update where
* possible to ensure you can claim HTLC outputs on the latest commitment transaction
* broadcasted later.
*
* In case of distributed watchtowers deployment, the new version must be written to disk, as
* state may have been stored but rejected due to a block forcing a commitment broadcast. This
* storage is used to claim outputs of rejected state confirmed onchain by another watchtower,
* lagging behind on block processing.
*
* [`PermanentFailure`]: ChannelMonitorUpdateStatus::PermanentFailure
* [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
*/
LDKChannelMonitorUpdateStatus_PermanentFailure,
; static native void init();
static { init(); }
}
Loading

0 comments on commit 0de4267

Please sign in to comment.