https://github.com/frePPLe/frepple/tree/master
class OperatorDelete:
def __init__(self):
# Initialize the class
@classmethod
def initialize(cls):
# Initialize the metadata and Python class
@classmethod
def create(cls, args, kwds):
try:
# Create the solver
s = OperatorDelete()
# Iterate over extra keywords and set attributes
return s
except Exception as e:
# Handle exceptions
pass
@classmethod
def solve(cls, v):
# Implement the solve method
try:
# Free Python interpreter for other threads
Py_BEGIN_ALLOW_THREADS
# Perform the solving logic
if v is None:
# Delete all excess
self.solve()
elif isinstance(v, Demand):
# Delete upstream of a single demand
self.solve_demand(v)
elif isinstance(v, Buffer):
# Delete upstream of a single buffer
self.solve_buffer(v)
elif isinstance(v, Resource):
# Delete upstream of a single resource
self.solve_resource(v)
elif isinstance(v, OperationPlan):
# Delete an operation plan
self.solve_operation_plan(v)
except Exception as e:
# Handle exceptions
pass
finally:
# Reclaim Python interpreter
Py_END_ALLOW_THREADS
@classmethod
def solve_operation_plan(cls, o, v):
# Implement solve for operation plan
if not o:
return # Null argument passed
# Mark all buffers.
# The batching solver doesn't like that we push both consumers and producers,
# but ideally we would pass true for both arguments.
self.push_buffers(o, True, False)
# Delete the operation plan
if o.get_proposed():
if self.cmds:
self.cmds.add(CommandDeleteOperationPlan(o))
else:
del o
# Propagate to all upstream buffers
while buffersToScan:
curbuf = buffersToScan.pop()
self.solve_buffer(curbuf)
@classmethod
def solve_resource(cls, r, v):
# Implement solve for a resource
if self.get_log_level() > 0:
print(f"Scanning {r} for excess")
# Loop over all operation plans on the resource
for op_plan in r.get_load_plans():
if op_plan.get_event_type() == 1:
# Add all buffers into which material is produced to the stack
self.push_buffers(op_plan, False, True)
# Process all buffers found, and their upstream colleagues
while buffersToScan:
cur_buf = buffersToScan.pop()
self.solve_buffer(cur_buf)
@classmethod
def solve_demand(cls, d, v):
# Implement solve for a demand
if self.get_log_level() > 1:
print(f"Scanning {d} for excess")
# Delete all delivery operation plans.
while True:
# Find a candidate operation plan to delete
candidate = None
delivery_plans = d.get_delivery()
for op_plan in delivery_plans:
if op_plan.get_proposed():
candidate = op_plan
break
if not candidate:
break
# Push the buffer on the stack in which the deletion creates excess inventory
self.push_buffers(candidate, True, False)
# Delete only the delivery, immediately or through a delete command
if self.cmds:
self.cmds.add(CommandDeleteOperationPlan(candidate))
else:
del candidate
@classmethod
def push_buffers(cls, o, consuming, producing):
# Implement pushing buffers
# Loop over all flow plans
for flow_plan in o.get_flow_plans():
# Skip flow plans we're not interested in
if not (
(consuming and flow_plan.get_quantity() < 0)
or (producing and flow_plan.get_quantity() > 0)
):
continue
# Check if the buffer is already found on the stack
found = False
for buf in buffersToScan:
if buf == flow_plan.get_buffer():
found = True
break
# Add the buffer to the stack if not found
if not found:
buffersToScan.append(flow_plan.get_buffer())
# Recursive call for all sub-operation plans
for sub_op_plan in o:
self.push_buffers(sub_op_plan, consuming, producing)
@classmethod
def solve_buffer(cls, b, v):
# Implement solve for a buffer
if self.get_log_level() > 1:
print(f"Scanning buffer {b}")
# Get the list of flow plans for the buffer
flow_plans = b.get_flow_plans()
fiter = iter(flow_plans)
fend = None # Replace with the actual end condition for the flow plans
if fiter is fend:
return # No flow plans in the buffer
excess = fiter.get_onhand() - fiter.get_min()
if excess > ROUNDING_ERROR:
fiter = iter(flow_plans)
while excess > ROUNDING_ERROR and fiter is not fend:
if fiter.get_quantity() <= 0:
# Not a producer
fiter = iter(fiter)
continue
fp = None
if fiter.get_event_type() == 1:
fp = fiter
if not fp or not fp.get_operation_plan().get_proposed() or \
fp.get_operation_plan().get_demand() or \
(fp.get_operation_plan().get_owner() and \
fp.get_operation_plan().get_owner().get_demand()) or \
fp.get_flow().has_type(FlowTransferBatch):
# It's locked or a delivery operation plan
fiter = iter(fiter)
continue
cur_excess = b.get_excess(iter(fiter))
if fp:
for flow_plan in fp.get_operation_plan().get_flow_plans():
if flow_plan.get_quantity() < ROUNDING_ERROR or \
flow_plan.get_buffer() == b or \
not flow_plan.get_flow().get_quantity():
continue
my_excess = (b.get_excess(flow_plan) - \
flow_plan.get_flow().get_quantity_fixed()) * \
fp.get_flow().get_quantity() / \
flow_plan.get_flow().get_quantity()
if my_excess >= 0.0 and my_excess < cur_excess:
cur_excess = my_excess
if cur_excess < ROUNDING_ERROR:
fiter = iter(fiter)
continue
while fiter is not fend and fiter.get_event_type() == 1 and \
fiter.get_operation_plan().get_top_owner() == \
fp.get_operation_plan().get_top_owner():
fiter = iter(fiter)
new_size_opplan = None # Calculate the new size as needed
new_size_flowplan = None # Calculate the new size as needed
if cur_excess < fp.get_flow().get_quantity_fixed() + \
fp.get_operation().get_size_multiple() * \
fp.get_flow().get_quantity():
# This excess is unavoidable
fiter = iter(fiter)
continue
elif cur_excess >= fiter.get_quantity() - ROUNDING_ERROR:
# Completely delete the producer
new_size_opplan = 0.0
new_size_flowplan = 0.0
else:
# Resize the producer
# We need to keep the operation plan start date constant
# during the resize
# Calculate new sizes and apply them
if new_size_flowplan < ROUNDING_ERROR:
# The complete operation plan is excess
excess -= fiter.get_quantity()
self.push_buffers(fp.get_operation_plan(), True, False)
if self.cmds:
self.cmds.add(CommandDeleteOperationPlan(fp.get_operation_plan()))
else:
del fp.get_operation_plan()
else:
# Reduce the operation plan
# Add upstream buffers to the stack
self.push_buffers(fp.get_operation_plan(), True, False)
excess -= fiter.get_quantity() - new_size_flowplan
# Resize operation plan if needed
if self.cmds:
# TODO: Adjust the command or operation plan resizing logic
self.cmds.add(CommandMoveOperationPlan(
fp.get_operation_plan(), Date.infinite_past,
fp.get_operation_plan().get_end(), new_size_opplan))
else:
# Set the new size for the operation plan
fp.get_operation_plan().set_quantity(new_size_opplan)
fiter = iter(fiter)
@staticmethod
def solve_python(self, args):
# Implement the solve method with Python-specific code
- 关键在于理解buffer的含义以及操作。
- 有consumer和producing, 数量有正负
Identify Critical Paths: Start by identifying the critical paths in your scheduling process. Critical paths are sequences of tasks that have the least flexibility in terms of start times. These paths are essential to meeting project deadlines or other constraints.
Determine Buffer Locations: Based on your critical paths, identify locations in your schedule where buffers can be introduced. These locations are typically at the end of a critical path or just before a critical constraint.
Size the Buffer: The buffer’s size depends on factors such as variability in task durations, uncertainty in resource availability, and the desired level of risk mitigation. It’s often calculated based on statistical analysis, like the Critical Chain Project Management (CCPM) method or Monte Carlo simulations.
Set Buffer Policies: Define policies that determine when and how the buffer is consumed. Common buffer policies include:
Start Buffer: This is added at the start of the project. It ensures that the project starts on time.
Resource Buffer: This buffer is used to manage resource constraints. It ensures that critical resources are available when needed.
Feeding Buffer: Placed before critical constraints or dependent tasks, this buffer ensures that inputs are available as needed.
Monitoring and Control: Continuously monitor the progress of your project. If tasks start to encroach on the buffer, take action to bring the project back on track. This might involve reallocating resources, addressing bottlenecks, or resequencing tasks.
Buffer Management: The focus should be on protecting the project buffer. If it starts to be consumed, assess why this is happening and take corrective action.
Project Review: After the project is completed, conduct a review to understand why the buffer was consumed. This can help in making improvements for future projects.
Flexibility: Keep in mind that the buffer provides flexibility, but it’s important to strike a balance. Having too much buffer may result in inefficient resource allocation, while too little buffer might not provide enough protection.文章来源:https://www.toymoban.com/news/detail-753053.html
In the context of backward scheduling, the buffer helps to ensure that scheduled tasks can start as soon as possible while accounting for constraints and uncertainties. It’s essentially a cushion that provides protection against delays without compromising the overall project timeline. The goal is to optimize the use of resources and time, minimize disruptions, and ensure that the project is completed on time.文章来源地址https://www.toymoban.com/news/detail-753053.html
到了这里,关于APS开源源码解读: 排程工具 frepple的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!