skip cache, excludes a server side datatables
This commit is contained in:
@ -96,6 +96,7 @@ class RunRequest(BaseModel):
|
||||
#GENERATED ID v ramci runu, vaze vsechny runnery v batchovem behu
|
||||
batch_id: Optional[str] = None
|
||||
cash: int = 100000
|
||||
skip_cache: Optional[bool] = False
|
||||
|
||||
|
||||
class RunnerView(BaseModel):
|
||||
@ -255,6 +256,13 @@ class RunArchiveView(BaseModel):
|
||||
end_positions_avgp: float = 0
|
||||
metrics: Union[dict, str] = None
|
||||
|
||||
#same but with pagination
|
||||
class RunArchiveViewPagination(BaseModel):
|
||||
draw: int
|
||||
recordsTotal: int
|
||||
recordsFiltered: int
|
||||
data: List[RunArchiveView]
|
||||
|
||||
#trida pro ukladani historie stoplossy do ext_data
|
||||
class SLHistory(BaseModel):
|
||||
id: Optional[UUID]
|
||||
|
||||
@ -7,7 +7,7 @@ from alpaca.data.enums import DataFeed
|
||||
from alpaca.data.timeframe import TimeFrame
|
||||
from v2realbot.strategy.base import StrategyState
|
||||
from v2realbot.enums.enums import RecordType, StartBarAlign, Mode, Account, OrderSide
|
||||
from v2realbot.common.model import RunDay, StrategyInstance, Runner, RunRequest, RunArchive, RunArchiveView, RunArchiveDetail, RunArchiveChange, Bar, TradeEvent, TestList, Intervals, ConfigItem, InstantIndicator
|
||||
from v2realbot.common.model import RunDay, StrategyInstance, Runner, RunRequest, RunArchive, RunArchiveView, RunArchiveViewPagination, RunArchiveDetail, RunArchiveChange, Bar, TradeEvent, TestList, Intervals, ConfigItem, InstantIndicator
|
||||
from v2realbot.utils.utils import AttributeDict, zoneNY, zonePRG, safe_get, dict_replace_value, Store, parse_toml_string, json_serial, is_open_hours, send_to_telegram, concatenate_weekdays
|
||||
from v2realbot.utils.ilog import delete_logs
|
||||
from v2realbot.common.PrescribedTradeModel import Trade, TradeDirection, TradeStatus, TradeStoplossType
|
||||
@ -307,6 +307,12 @@ def capsule(target: object, db: object, inter_batch_params: dict = None):
|
||||
|
||||
print("Strategy instance stopped. Update runners")
|
||||
reason = None
|
||||
|
||||
if target.se.is_set():
|
||||
print("STOP FLAG IS SET - cancel BATCH")
|
||||
inter_batch_params["stop"] = True
|
||||
reason = "STOP Signal received"
|
||||
|
||||
except Exception as e:
|
||||
reason = "SHUTDOWN Exception:" + str(e) + format_exc()
|
||||
#raise RuntimeError('Exception v runneru POZOR') from e
|
||||
@ -518,6 +524,12 @@ def batch_run_manager(id: UUID, runReq: RunRequest, rundays: list[RunDay]):
|
||||
print(f"CHyba v runu #{cnt} od:{runReq.bt_from} do {runReq.bt_to} -> {id_val}")
|
||||
break
|
||||
|
||||
if "stop" in inter_batch_params and inter_batch_params["stop"] is True:
|
||||
#mame stop signal rusime cely BATCH
|
||||
print("STOP SIGNAL RECEIVED")
|
||||
break
|
||||
|
||||
|
||||
print("Batch manager FINISHED")
|
||||
##TBD sem zapsat do hlavicky batchů! abych měl náhled - od,do,profit, metrics
|
||||
batch_abs_profit = 0
|
||||
@ -963,6 +975,58 @@ def get_all_archived_runners() -> list[RunArchiveView]:
|
||||
pool.release_connection(conn)
|
||||
return 0, results
|
||||
|
||||
#with pagination
|
||||
def get_all_archived_runners_p(start: int, length: int, draw: int) -> list[RunArchiveViewPagination]:
|
||||
conn = pool.get_connection()
|
||||
try:
|
||||
conn.row_factory = Row
|
||||
c = conn.cursor()
|
||||
|
||||
# Query to get the total count of records
|
||||
total_count_query = "SELECT COUNT(*) FROM runner_header"
|
||||
c.execute(total_count_query)
|
||||
total_count = c.fetchone()[0]
|
||||
|
||||
# Query to get the paginated data
|
||||
paginated_query = f"""
|
||||
SELECT runner_id, strat_id, batch_id, symbol, name, note, started,
|
||||
stopped, mode, account, bt_from, bt_to, ilog_save, profit,
|
||||
trade_count, end_positions, end_positions_avgp, metrics
|
||||
FROM runner_header
|
||||
ORDER BY stopped DESC
|
||||
LIMIT {length} OFFSET {start}
|
||||
"""
|
||||
c.execute(paginated_query)
|
||||
rows = c.fetchall()
|
||||
|
||||
results = [row_to_runarchiveview(row) for row in rows]
|
||||
|
||||
finally:
|
||||
conn.row_factory = None
|
||||
pool.release_connection(conn)
|
||||
|
||||
try:
|
||||
obj = RunArchiveViewPagination(draw=draw,recordsTotal=total_count, recordsFiltered=total_count,data=results)
|
||||
return 0, obj
|
||||
except Exception as e:
|
||||
return -2, str(e)+format_exc()
|
||||
|
||||
|
||||
conn = pool.get_connection()
|
||||
try:
|
||||
conn.row_factory = Row
|
||||
c = conn.cursor()
|
||||
c.execute(f"SELECT runner_id, strat_id, batch_id, symbol, name, note, started, stopped, mode, account, bt_from, bt_to, ilog_save, profit, trade_count, end_positions, end_positions_avgp, metrics FROM runner_header")
|
||||
rows = c.fetchall()
|
||||
results = []
|
||||
for row in rows:
|
||||
results.append(row_to_runarchiveview(row))
|
||||
finally:
|
||||
conn.row_factory = None
|
||||
pool.release_connection(conn)
|
||||
return 0, results
|
||||
|
||||
|
||||
#DECOMMS
|
||||
# def get_all_archived_runners():
|
||||
# conn = pool.get_connection()
|
||||
|
||||
@ -24,7 +24,8 @@ class TradeAggregator:
|
||||
align: StartBarAlign = StartBarAlign.ROUND,
|
||||
mintick: int = 0,
|
||||
exthours: bool = False,
|
||||
excludes: list = AGG_EXCLUDED_TRADES):
|
||||
excludes: list = AGG_EXCLUDED_TRADES,
|
||||
skip_cache: bool = False):
|
||||
"""
|
||||
UPDATED VERSION - vrací více záznamů
|
||||
|
||||
@ -44,6 +45,7 @@ class TradeAggregator:
|
||||
self.update_ltp = update_ltp
|
||||
self.exthours = exthours
|
||||
self.excludes = excludes
|
||||
self.skip_cache = skip_cache
|
||||
|
||||
if mintick >= resolution:
|
||||
print("Mintick musi byt mensi nez resolution")
|
||||
@ -715,7 +717,7 @@ class TradeAggregator:
|
||||
#returns cached objects for given period
|
||||
def get_cache(self, date_from: datetime, date_to: datetime):
|
||||
file_path = self.populate_file_name(date_from, date_to)
|
||||
if os.path.exists(file_path):
|
||||
if self.skip_cache is False and os.path.exists(file_path):
|
||||
##daily aggregated file exists
|
||||
with open (file_path, 'rb') as fp:
|
||||
cachedobject = dill.load(fp)
|
||||
@ -770,8 +772,8 @@ class TradeAggregator2Queue(TradeAggregator):
|
||||
Child of TradeAggregator - sends items to given queue
|
||||
In the future others will be added - TradeAggToTxT etc.
|
||||
"""
|
||||
def __init__(self, symbol: str, queue: Queue, rectype: RecordType = RecordType.BAR, resolution: int = 5, minsize: int = 100, update_ltp: bool = False, align: StartBarAlign = StartBarAlign.ROUND, mintick: int = 0, exthours: bool = False, excludes: list = AGG_EXCLUDED_TRADES):
|
||||
super().__init__(rectype=rectype, resolution=resolution, minsize=minsize, update_ltp=update_ltp, align=align, mintick=mintick, exthours=exthours, excludes=excludes)
|
||||
def __init__(self, symbol: str, queue: Queue, rectype: RecordType = RecordType.BAR, resolution: int = 5, minsize: int = 100, update_ltp: bool = False, align: StartBarAlign = StartBarAlign.ROUND, mintick: int = 0, exthours: bool = False, excludes: list = AGG_EXCLUDED_TRADES, skip_cache: bool = False):
|
||||
super().__init__(rectype=rectype, resolution=resolution, minsize=minsize, update_ltp=update_ltp, align=align, mintick=mintick, exthours=exthours, excludes=excludes, skip_cache=skip_cache)
|
||||
self.queue = queue
|
||||
self.symbol = symbol
|
||||
self.cached_object = Queue()
|
||||
@ -815,8 +817,8 @@ class TradeAggregator2List(TradeAggregator):
|
||||
""""
|
||||
stores records to the list
|
||||
"""
|
||||
def __init__(self, symbol: str, btdata: list, rectype: RecordType = RecordType.BAR, resolution: int = 5, minsize: int = 100, update_ltp: bool = False, align: StartBarAlign = StartBarAlign.ROUND, mintick: int = 0, exthours: bool = False, excludes: list = AGG_EXCLUDED_TRADES):
|
||||
super().__init__(rectype=rectype, resolution=resolution, minsize=minsize, update_ltp=update_ltp, align=align, mintick=mintick, exthours=exthours, excludes=excludes)
|
||||
def __init__(self, symbol: str, btdata: list, rectype: RecordType = RecordType.BAR, resolution: int = 5, minsize: int = 100, update_ltp: bool = False, align: StartBarAlign = StartBarAlign.ROUND, mintick: int = 0, exthours: bool = False, excludes: list = AGG_EXCLUDED_TRADES, skip_cache: bool = False):
|
||||
super().__init__(rectype=rectype, resolution=resolution, minsize=minsize, update_ltp=update_ltp, align=align, mintick=mintick, exthours=exthours, excludes=excludes, skip_cache=skip_cache)
|
||||
self.btdata = btdata
|
||||
self.symbol = symbol
|
||||
self.cached_object = []
|
||||
|
||||
@ -11,7 +11,7 @@ import uvicorn
|
||||
from uuid import UUID
|
||||
import v2realbot.controller.services as cs
|
||||
from v2realbot.utils.ilog import get_log_window
|
||||
from v2realbot.common.model import StrategyInstance, RunnerView, RunRequest, Trade, RunArchive, RunArchiveView, RunArchiveDetail, Bar, RunArchiveChange, TestList, ConfigItem, InstantIndicator
|
||||
from v2realbot.common.model import StrategyInstance, RunnerView, RunRequest, Trade, RunArchive, RunArchiveView, RunArchiveViewPagination, RunArchiveDetail, Bar, RunArchiveChange, TestList, ConfigItem, InstantIndicator
|
||||
from fastapi import FastAPI, WebSocket, WebSocketDisconnect, Depends, HTTPException, status, WebSocketException, Cookie, Query
|
||||
from fastapi.responses import FileResponse, StreamingResponse
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
@ -425,6 +425,16 @@ def _get_all_archived_runners() -> list[RunArchiveView]:
|
||||
else:
|
||||
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"No data found")
|
||||
|
||||
#get all archived runners headers - just RunArchiveView - with pagination
|
||||
@app.get("/archived_runners_p/", dependencies=[Depends(api_key_auth)])
|
||||
def _get_all_archived_runners_p(start: int = 0, length: int = 10, draw: int = 1) -> RunArchiveViewPagination:
|
||||
res, set =cs.get_all_archived_runners_p(start, length, draw)
|
||||
if res == 0:
|
||||
return set
|
||||
else:
|
||||
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"No data found")
|
||||
|
||||
|
||||
#get complete header data for specific archivedRunner = RunArchive
|
||||
@app.get("/archived_runners/{runner_id}", dependencies=[Depends(api_key_auth)])
|
||||
def _get_archived_runner_header_byID(runner_id: UUID) -> RunArchive:
|
||||
|
||||
@ -30,6 +30,7 @@
|
||||
<!-- <script src="https://cdn.datatables.net/1.13.4/js/jquery.dataTables.min.js"></script> -->
|
||||
<script src="/static/js/libs/jquery.dataTables.min.js"></script>
|
||||
|
||||
<script src="https://cdn.datatables.net/rowgroup/1.0.2/js/dataTables.rowGroup.min.js"></script>
|
||||
<script src="/static/js/jquery.serializejson.js"></script>
|
||||
|
||||
<!-- <script src="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0-alpha3/dist/js/bootstrap.bundle.min.js" integrity="sha384-ENjdO4Dr2bkBIFxQpeoTz1HIcje39Wm4jDKdf19U8gI4ddQ3GYNS7NTKfAdVQSZe" crossorigin="anonymous"></script> -->
|
||||
|
||||
@ -919,11 +919,16 @@ $("#delModalArchive").on('submit','#delFormArchive', function(event){
|
||||
var archiveRecords =
|
||||
$('#archiveTable').DataTable( {
|
||||
ajax: {
|
||||
url: '/archived_runners/',
|
||||
dataSrc: '',
|
||||
url: '/archived_runners_p/',
|
||||
// dataSrc: 'data',
|
||||
beforeSend: function (xhr) {
|
||||
xhr.setRequestHeader('X-API-Key',
|
||||
API_KEY); },
|
||||
// data: function(d) {
|
||||
// d.start = d.start;
|
||||
// d.length = d.length;
|
||||
// d.draw = d.draw;
|
||||
// },
|
||||
error: function(xhr, status, error) {
|
||||
//var err = eval("(" + xhr.responseText + ")");
|
||||
//window.alert(JSON.stringify(xhr));
|
||||
@ -950,7 +955,8 @@ var archiveRecords =
|
||||
{data: 'batch_id', visible: true},
|
||||
],
|
||||
paging: false,
|
||||
processing: false,
|
||||
processing: true,
|
||||
serverSide: true,
|
||||
columnDefs: [{
|
||||
targets: [0,1,17],
|
||||
render: function ( data, type, row ) {
|
||||
@ -1104,8 +1110,9 @@ var archiveRecords =
|
||||
],
|
||||
order: [[6, 'desc']],
|
||||
select: {
|
||||
info: true,
|
||||
style: 'multi',
|
||||
selector: 'td'
|
||||
selector: 'tbody > tr:not(.group-header) td'
|
||||
},
|
||||
paging: true,
|
||||
// lengthChange: false,
|
||||
@ -1116,8 +1123,104 @@ var archiveRecords =
|
||||
// $(row).addClass('highlight');
|
||||
// }
|
||||
//}
|
||||
} );
|
||||
// Add row grouping based on 'batch_id'
|
||||
rowGroup: {
|
||||
dataSrc: 'batch_id',
|
||||
startRender: function (rows, group) {
|
||||
var groupId = group ? group : 'no-batch-id';
|
||||
|
||||
// Initialize variables for the group
|
||||
var itemCount = 0;
|
||||
var firstNote = '';
|
||||
var profit = '';
|
||||
|
||||
// Process each item only once
|
||||
archiveRecords.rows({ search: 'applied' }).every(function (rowIdx, tableLoop, rowLoop) {
|
||||
var data = this.data();
|
||||
|
||||
if ((group && data.batch_id === group)) {
|
||||
itemCount++;
|
||||
if (itemCount === 1) {
|
||||
firstNote = data.note ? data.note.substring(0, 14) : '';
|
||||
try {
|
||||
profit = data.metrics.profit.batch_sum_profit;
|
||||
} catch (e) {
|
||||
profit = 'N/A';
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Construct the group header
|
||||
var groupHeaderContent = '<strong>' + (group ? 'Batch ID: ' + group : 'No Batch') + '</strong>';
|
||||
groupHeaderContent += (group ? ' <span>(' + itemCount + ')</span>' : '');
|
||||
if (firstNote) {
|
||||
groupHeaderContent += ' ' + firstNote;
|
||||
}
|
||||
if (profit) {
|
||||
groupHeaderContent += ' - <span class="profit-info">Profit: ' + profit + '</span>';
|
||||
}
|
||||
|
||||
return $('<tr/>')
|
||||
.append('<td colspan="18">' + groupHeaderContent + '</td>')
|
||||
.attr('data-name', groupId)
|
||||
.addClass('group-header collapsed');
|
||||
}
|
||||
},
|
||||
drawCallback: function (settings) {
|
||||
var api = this.api();
|
||||
var rows = api.rows({ page: 'current' }).nodes();
|
||||
|
||||
|
||||
// Iterate over all rows in the current page
|
||||
api.column(17, { page: 'current' }).data().each(function (group, i) {
|
||||
var groupName = group ? group : 'no-batch-id';
|
||||
var stateKey = 'dt-group-state-' + groupName;
|
||||
var state = localStorage.getItem(stateKey);
|
||||
|
||||
if (state === 'collapsed') {
|
||||
// Hide all rows in the collapsed group
|
||||
$(rows).eq(i).hide();
|
||||
$('tr[data-name="' + groupName + '"]').addClass('collapsed');
|
||||
} else {
|
||||
// Show all rows in the expanded group
|
||||
$(rows).eq(i).show();
|
||||
$('tr[data-name="' + groupName + '"]').removeClass('collapsed');
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Function to generate a unique key for localStorage based on batch_id
|
||||
function generateStorageKey(batchId) {
|
||||
return 'dt-group-state-' + batchId;
|
||||
}
|
||||
|
||||
// Expand/Collapse functionality
|
||||
$('#archiveTable tbody').on('click', 'tr.group-header', function () {
|
||||
var name = $(this).data('name');
|
||||
var collapsed = $(this).hasClass('collapsed');
|
||||
$(this).toggleClass('collapsed');
|
||||
|
||||
archiveRecords.rows().every(function () {
|
||||
var rowGroup = this.data().batch_id ? this.data().batch_id : 'no-batch-id';
|
||||
if (rowGroup === name) {
|
||||
if (collapsed) {
|
||||
this.node().style.display = '';
|
||||
} else {
|
||||
this.node().style.display = 'none';
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Save the state
|
||||
if (collapsed) {
|
||||
localStorage.setItem(generateStorageKey(name), 'expanded');
|
||||
} else {
|
||||
localStorage.setItem(generateStorageKey(name), 'collapsed');
|
||||
}
|
||||
|
||||
});
|
||||
//WIP buttons to hide datatable columns
|
||||
// document.querySelectorAll('a.toggle-vis').forEach((el) => {
|
||||
// el.addEventListener('click', function (e) {
|
||||
|
||||
1177
v2realbot/static/js/archivetables_firstbatchasheader.js
Normal file
1177
v2realbot/static/js/archivetables_firstbatchasheader.js
Normal file
File diff suppressed because it is too large
Load Diff
@ -173,6 +173,34 @@ table.dataTable thead>tr>th.sorting_asc:before, table.dataTable thead>tr>th.sort
|
||||
transform: translate(-50%, -50%);
|
||||
}
|
||||
|
||||
|
||||
.group-header {
|
||||
/* cursor: pointer; */
|
||||
background-color: #f2f2f2; /* Light gray background */
|
||||
color: #b2b2b2; /* Dark text for contrast */
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.group-header .profit-info {
|
||||
color: #3e999e; /* Highlight profit info */
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.group-header strong {
|
||||
color: #3e999e; /* Distinct color for Batch ID */
|
||||
}
|
||||
|
||||
.group-header.collapsed::after {
|
||||
content: '▼'; /* Downward arrow for collapsed groups */
|
||||
float: right;
|
||||
}
|
||||
|
||||
.group-header::after {
|
||||
content: '▲'; /* Upward arrow for expanded groups */
|
||||
float: right;
|
||||
}
|
||||
|
||||
|
||||
/* .btn-outline-success {
|
||||
--bs-btn-color: #316164;
|
||||
--bs-btn-border-color: #247e85;
|
||||
|
||||
@ -514,9 +514,12 @@ class Strategy:
|
||||
self.bt = None
|
||||
self.btdata= None
|
||||
self.dataloader = None
|
||||
self.state = None
|
||||
self.rtqueue = None
|
||||
self._streams = None
|
||||
self.q1 = None
|
||||
self.q2 = None
|
||||
self.state.release()
|
||||
self.state = None
|
||||
|
||||
|
||||
|
||||
@ -778,6 +781,13 @@ class StrategyState:
|
||||
self.mode = None
|
||||
self.wait_for_fill = None
|
||||
|
||||
def release(self):
|
||||
#release large variables
|
||||
self.bars = None
|
||||
self.trades = None
|
||||
self.indicators = None
|
||||
self.iter_log_list = None
|
||||
|
||||
def ilog(self, e: str = None, msg: str = None, lvl: int = 1, **kwargs):
|
||||
if lvl < ILOG_SAVE_LEVEL_FROM:
|
||||
return
|
||||
|
||||
Reference in New Issue
Block a user