@@ -16,17 +16,19 @@ module Development.IDE.Graph.Internal.Scheduler
1616 , reportTotalCount
1717 , isDirty
1818 , isRunDepChangedOne
19+ , doAction
1920 ) where
2021
2122import Control.Concurrent.STM (STM , atomically ,
2223 flushTQueue , readTQueue ,
2324 readTVar , writeTQueue ,
2425 writeTVar )
25- import Control.Monad (filterM , foldM , forM_ ,
26- void )
27- import Data.Maybe (fromMaybe )
26+ import Control.Monad (filterM , foldM , forM ,
27+ forM_ , void )
28+ import Data.Maybe (catMaybes , fromMaybe )
2829import qualified StmContainers.Map as SMap
2930
31+ import Control.Concurrent.Async (mapConcurrently )
3032import qualified Control.Concurrent.STM.TPQueue as TPQ
3133import Data.Foldable (Foldable (.. ))
3234import Development.IDE.Graph.Internal.Key
@@ -39,6 +41,8 @@ import Development.IDE.Graph.Internal.Types (Database (..),
3941 Status (.. ), dbNotLocked ,
4042 lookupDatabaseRuntimeDepRootCounter )
4143import qualified StmContainers.Set as SSet
44+ import System.Time.Extra (duration )
45+
4246
4347type StatusCache = KeyMap (Maybe KeyDetails )
4448
@@ -75,9 +79,24 @@ isRunDepChangedOne me dep =
7579-- so when a dep is cleaned, we can decrement the pending count, and when it reaches zero, we can move it to databaseRunningReady
7680prepareToRunKey :: Key -> Database -> STM ()
7781prepareToRunKey key db =
78- void $ prepareToRunKeyCached db mempty key
82+ void $ prepareToRunKeyCached db undefined key
83+
84+ data PrepareAction a k = PrepareActionNothing | PrepareActionPending a Int | PrepareActionReady a
85+
86+ doAction :: Database -> PrepareAction (Key , RunMode , Maybe Result ) Key -> STM (Maybe (Key , RunMode , Maybe Result ))
87+ doAction _ PrepareActionNothing = pure Nothing
88+ doAction (Database {.. }) (PrepareActionReady (key, runMode, mRes) ) = do
89+ let SchedulerState {.. } = databaseScheduler
90+ SMap. delete key schedulerRunningPending
91+ pure $ Just (key, runMode, mRes)
92+
93+ doAction (Database {.. }) (PrepareActionPending (key, runMode, mRes) pendingCount) = do
94+ let SchedulerState {.. } = databaseScheduler
95+ SMap. insert (pendingCount, runMode, mRes) key schedulerRunningPending
96+ pure Nothing
7997
80- prepareToRunKeyCached :: Database -> StatusCache -> Key -> STM StatusCache
98+ -- prepareToRunKeyCached :: Database -> StatusCache -> Key -> STM StatusCache
99+ prepareToRunKeyCached :: Database -> StatusCache -> Key -> STM (PrepareAction (Key , RunMode , Maybe Result ) Key , StatusCache )
81100prepareToRunKeyCached db@ Database {.. } cache0 key = do
82101 let SchedulerState {.. } = databaseScheduler
83102 (status, cache1) <- lookupStatusCache db key cache0
@@ -106,17 +125,17 @@ prepareToRunKeyCached db@Database {..} cache0 key = do
106125 -- since it is not upsweep responsibility
107126 _ -> cleanHook key db >> pure (cache1, Nothing )
108127 case res of
109- Nothing -> pure cache2
128+ Nothing -> pure ( PrepareActionNothing , cache2)
110129 Just (pendingCount, runMode, mRes) ->
111130 if pendingCount == 0
112131 then do
113- prio <- lookupDatabaseRuntimeDepRootCounter key db
114- TPQ. writeTPQueue schedulerRunningReady prio (key, runMode, mRes)
115- SMap. delete key schedulerRunningPending
116- pure cache2
132+ -- prio <- lookupDatabaseRuntimeDepRootCounter key db
133+ -- TPQ.writeTPQueue schedulerRunningReady prio (key, runMode, mRes)
134+ -- SMap.delete key schedulerRunningPending
135+ pure ( PrepareActionReady (key, runMode, mRes), cache2)
117136 else do
118- SMap. insert (pendingCount, runMode, mRes) key schedulerRunningPending
119- pure cache2
137+ -- SMap.insert (pendingCount, runMode, mRes) key schedulerRunningPending
138+ pure ( PrepareActionPending (key, runMode, mRes) pendingCount, cache2)
120139 where
121140 collectDep r (cacheAcc, cleanAcc, modeAcc) dep = do
122141 (depStatus, cacheNext) <- lookupStatusCache db dep cacheAcc
@@ -129,14 +148,39 @@ prepareToRunKeyCached db@Database {..} cache0 key = do
129148
130149
131150-- take out all databaseDirtyTargets and prepare them to run
132- prepareToRunKeys :: Database -> IO [Key ]
133- prepareToRunKeys db =
134- atomically $ do
135- dbNotLocked db
136- let SchedulerState {.. } = databaseScheduler db
137- dirtys <- flushTQueue schedulerUpsweepQueue
138- _ <- foldM (prepareToRunKeyCached db) mempty dirtys
139- return dirtys
151+ -- prepareToRunKeys :: Database -> IO [Key]
152+ prepareToRunKeys db = do
153+ let SchedulerState {.. } = databaseScheduler db
154+ dirties <- atomically $
155+ -- dbNotLocked db
156+ flushTQueue schedulerUpsweepQueue
157+ -- let dirtiesList = chunksOf 1 dirties
158+ let dirtiesList = toNChunks 8 dirties
159+ -- We need to make sure what is the good number of dirtis for a batch
160+ -- maybe we should make it dynamic based on the total number of dirties
161+ (t1, toRunsList) <- duration $ flip mapConcurrently dirtiesList $ \ ks -> do
162+ atomically $ fst <$> foldM (\ (result, cache) k -> do
163+ (nresult, ncache) <- prepareToRunKeyCached db cache k
164+ return (nresult: result, ncache)
165+ ) ([] , mempty ) ks
166+ -- (t1, toRunsList) <- duration $ forM dirtiesList $ \ks -> do
167+ -- atomically $ fst <$> foldM (\(result, cache) k -> do
168+ -- (nresult, ncache) <- prepareToRunKeyCached db cache k
169+ -- return (nresult: result, ncache)
170+ -- ) ([], mempty) ks
171+ -- dbNotLocked db
172+ (t2, res) <- duration $ forM toRunsList $ \ toRuns -> forM toRuns $ \ k -> do
173+ -- we would potentially missed an acition here decreasePendingCount
174+ -- if this is not running fast enough
175+ atomically $ doAction db k
176+ (t3, () ) <- duration $ atomically $ TPQ. fromList schedulerRunningReady [(0 ,(k, a, b)) | r <- res, (k, a, b) <- catMaybes r]
177+ return ((t1,t2,t3), dirties)
178+ where
179+ toNChunks n xs = go xs
180+ where
181+ go [] = replicate n []
182+ go ys = let (h, t) = splitAt (length ys `div` n + if length ys `mod` n > 0 then 1 else 0 ) ys
183+ in h : go t
140184
141185prepareToRunKeysRealTime :: Database -> IO ()
142186prepareToRunKeysRealTime db@ Database {.. } = do
0 commit comments