Struct necsim_impls_no_std::cogs::emigration_exit::never::NeverEmigrationExit
source · pub struct NeverEmigrationExit(/* private fields */);
Trait Implementations§
source§impl Backup for NeverEmigrationExit
impl Backup for NeverEmigrationExit
unsafe fn backup_unchecked(&self) -> Self
fn backup(&self) -> BackedUp<Self>
source§impl Debug for NeverEmigrationExit
impl Debug for NeverEmigrationExit
source§impl Default for NeverEmigrationExit
impl Default for NeverEmigrationExit
source§fn default() -> NeverEmigrationExit
fn default() -> NeverEmigrationExit
Returns the “default value” for a type. Read more
source§impl<M: MathsCore, H: Habitat<M>, G: RngCore<M>, S: LineageStore<M, H>> EmigrationExit<M, H, G, S> for NeverEmigrationExit
impl<M: MathsCore, H: Habitat<M>, G: RngCore<M>, S: LineageStore<M, H>> EmigrationExit<M, H, G, S> for NeverEmigrationExit
§fn optionally_emigrate(
&mut self,
global_reference: GlobalLineageReference,
dispersal_origin: IndexedLocation,
dispersal_target: Location,
prior_time: NonNegativeF64,
event_time: PositiveF64,
simulation: &mut PartialSimulation<M, H, G, S>,
rng: &mut G
) -> Option<(GlobalLineageReference, IndexedLocation, Location, NonNegativeF64, PositiveF64)>
fn optionally_emigrate( &mut self, global_reference: GlobalLineageReference, dispersal_origin: IndexedLocation, dispersal_target: Location, prior_time: NonNegativeF64, event_time: PositiveF64, simulation: &mut PartialSimulation<M, H, G, S>, rng: &mut G ) -> Option<(GlobalLineageReference, IndexedLocation, Location, NonNegativeF64, PositiveF64)>
Contracts Read more
source§impl RustToCuda for NeverEmigrationExit
impl RustToCuda for NeverEmigrationExit
type CudaRepresentation = NeverEmigrationExitCudaRepresentation
type CudaAllocation = NoCudaAlloc
source§impl RustToCudaAsync for NeverEmigrationExit
impl RustToCudaAsync for NeverEmigrationExit
type CudaAllocationAsync = NoCudaAlloc
Auto Trait Implementations§
impl Freeze for NeverEmigrationExit
impl RefUnwindSafe for NeverEmigrationExit
impl Send for NeverEmigrationExit
impl Sync for NeverEmigrationExit
impl Unpin for NeverEmigrationExit
impl UnwindSafe for NeverEmigrationExit
Blanket Implementations§
source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
§impl<T> ExtractDiscriminant for T
impl<T> ExtractDiscriminant for T
§type Discriminant = <T as ExtractDiscriminantSpec<<T as DiscriminantKind>::Discriminant>>::Ty
type Discriminant = <T as ExtractDiscriminantSpec<<T as DiscriminantKind>::Discriminant>>::Ty
The type of the discriminant, which must satisfy the trait bounds
required by
core::mem::Discriminant
. Read moresource§impl<T> LendToCuda for Twhere
T: RustToCuda,
impl<T> LendToCuda for Twhere
T: RustToCuda,
source§fn lend_to_cuda<O, E, F>(&self, inner: F) -> Result<O, E>where
E: From<CudaError>,
F: FnOnce(HostAndDeviceConstRef<'_, DeviceAccessible<<T as RustToCuda>::CudaRepresentation>>) -> Result<O, E>,
T: Sync,
fn lend_to_cuda<O, E, F>(&self, inner: F) -> Result<O, E>where
E: From<CudaError>,
F: FnOnce(HostAndDeviceConstRef<'_, DeviceAccessible<<T as RustToCuda>::CudaRepresentation>>) -> Result<O, E>,
T: Sync,
Lends an immutable borrow of
&self
to CUDA: Read moresource§fn lend_to_cuda_mut<O, E, F>(&mut self, inner: F) -> Result<O, E>where
E: From<CudaError>,
F: FnOnce(HostAndDeviceMutRef<'_, DeviceAccessible<<T as RustToCuda>::CudaRepresentation>>) -> Result<O, E>,
T: Sync + SafeMutableAliasing,
fn lend_to_cuda_mut<O, E, F>(&mut self, inner: F) -> Result<O, E>where
E: From<CudaError>,
F: FnOnce(HostAndDeviceMutRef<'_, DeviceAccessible<<T as RustToCuda>::CudaRepresentation>>) -> Result<O, E>,
T: Sync + SafeMutableAliasing,
source§fn move_to_cuda<O, E, F>(self, inner: F) -> Result<O, E>where
E: From<CudaError>,
F: FnOnce(HostAndDeviceOwned<'_, DeviceAccessible<<T as RustToCuda>::CudaRepresentation>>) -> Result<O, E>,
T: Send + RustToCuda,
<T as RustToCuda>::CudaRepresentation: StackOnly,
<T as RustToCuda>::CudaAllocation: EmptyCudaAlloc,
fn move_to_cuda<O, E, F>(self, inner: F) -> Result<O, E>where
E: From<CudaError>,
F: FnOnce(HostAndDeviceOwned<'_, DeviceAccessible<<T as RustToCuda>::CudaRepresentation>>) -> Result<O, E>,
T: Send + RustToCuda,
<T as RustToCuda>::CudaRepresentation: StackOnly,
<T as RustToCuda>::CudaAllocation: EmptyCudaAlloc,
source§impl<T> LendToCudaAsync for Twhere
T: RustToCudaAsync,
impl<T> LendToCudaAsync for Twhere
T: RustToCudaAsync,
source§fn lend_to_cuda_async<'stream, O, E, F>(
&self,
stream: Stream<'stream>,
inner: F
) -> Result<O, E>where
E: From<CudaError>,
F: FnOnce(Async<'_, 'stream, HostAndDeviceConstRef<'_, DeviceAccessible<<T as RustToCuda>::CudaRepresentation>>>) -> Result<O, E>,
T: Sync,
fn lend_to_cuda_async<'stream, O, E, F>(
&self,
stream: Stream<'stream>,
inner: F
) -> Result<O, E>where
E: From<CudaError>,
F: FnOnce(Async<'_, 'stream, HostAndDeviceConstRef<'_, DeviceAccessible<<T as RustToCuda>::CudaRepresentation>>>) -> Result<O, E>,
T: Sync,
Lends an immutable copy of
&self
to CUDA: Read more