##// END OF EJS Templates
update: fix spurious unclean status bug shown by previous commit...
Valentin Gatien-Baron -
r42722:d29db0a0 default
parent child Browse files
Show More
@@ -1472,10 +1472,10 b' def batchget(repo, mctx, wctx, wantfiled'
1472
1472
1473 Yields arbitrarily many (False, tuple) for progress updates, followed by
1473 Yields arbitrarily many (False, tuple) for progress updates, followed by
1474 exactly one (True, filedata). When wantfiledata is false, filedata is an
1474 exactly one (True, filedata). When wantfiledata is false, filedata is an
1475 empty list. When wantfiledata is true, filedata[i] is a triple (mode, size,
1475 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1476 mtime) of the file written for action[i].
1476 mtime) of the file f written for each action.
1477 """
1477 """
1478 filedata = []
1478 filedata = {}
1479 verbose = repo.ui.verbose
1479 verbose = repo.ui.verbose
1480 fctx = mctx.filectx
1480 fctx = mctx.filectx
1481 ui = repo.ui
1481 ui = repo.ui
@@ -1509,7 +1509,7 b' def batchget(repo, mctx, wctx, wantfiled'
1509 s = wfctx.lstat()
1509 s = wfctx.lstat()
1510 mode = s.st_mode
1510 mode = s.st_mode
1511 mtime = s[stat.ST_MTIME]
1511 mtime = s[stat.ST_MTIME]
1512 filedata.append((mode, size, mtime)) # for dirstate.normal
1512 filedata[f] = ((mode, size, mtime)) # for dirstate.normal
1513 if i == 100:
1513 if i == 100:
1514 yield False, (i, f)
1514 yield False, (i, f)
1515 i = 0
1515 i = 0
@@ -1670,7 +1670,7 b' def applyupdates(repo, actions, wctx, mc'
1670 actions[ACTION_GET],
1670 actions[ACTION_GET],
1671 threadsafe=threadsafe,
1671 threadsafe=threadsafe,
1672 hasretval=True)
1672 hasretval=True)
1673 getfiledata = []
1673 getfiledata = {}
1674 for final, res in prog:
1674 for final, res in prog:
1675 if final:
1675 if final:
1676 getfiledata = res
1676 getfiledata = res
@@ -1803,7 +1803,8 b' def applyupdates(repo, actions, wctx, mc'
1803 actions[k].extend(acts)
1803 actions[k].extend(acts)
1804 if k == ACTION_GET and wantfiledata:
1804 if k == ACTION_GET and wantfiledata:
1805 # no filedata until mergestate is updated to provide it
1805 # no filedata until mergestate is updated to provide it
1806 getfiledata.extend([None] * len(acts))
1806 for a in acts:
1807 getfiledata[a[0]] = None
1807 # Remove these files from actions[ACTION_MERGE] as well. This is
1808 # Remove these files from actions[ACTION_MERGE] as well. This is
1808 # important because in recordupdates, files in actions[ACTION_MERGE]
1809 # important because in recordupdates, files in actions[ACTION_MERGE]
1809 # are processed after files in other actions, and the merge driver
1810 # are processed after files in other actions, and the merge driver
@@ -1873,11 +1874,11 b' def recordupdates(repo, actions, branchm'
1873 pass
1874 pass
1874
1875
1875 # get
1876 # get
1876 for i, (f, args, msg) in enumerate(actions.get(ACTION_GET, [])):
1877 for f, args, msg in actions.get(ACTION_GET, []):
1877 if branchmerge:
1878 if branchmerge:
1878 repo.dirstate.otherparent(f)
1879 repo.dirstate.otherparent(f)
1879 else:
1880 else:
1880 parentfiledata = getfiledata[i] if getfiledata else None
1881 parentfiledata = getfiledata[f] if getfiledata else None
1881 repo.dirstate.normal(f, parentfiledata=parentfiledata)
1882 repo.dirstate.normal(f, parentfiledata=parentfiledata)
1882
1883
1883 # merge
1884 # merge
@@ -100,8 +100,9 b' def worker(ui, costperarg, func, statica'
100 workers
100 workers
101
101
102 hasretval - when True, func and the current function return an progress
102 hasretval - when True, func and the current function return an progress
103 iterator then a list (encoded as an iterator that yield many (False, ..)
103 iterator then a dict (encoded as an iterator that yield many (False, ..)
104 then a (True, list)). The resulting list is in the natural order.
104 then a (True, dict)). The dicts are joined in some arbitrary order, so
105 overlapping keys are a bad idea.
105
106
106 threadsafe - whether work items are thread safe and can be executed using
107 threadsafe - whether work items are thread safe and can be executed using
107 a thread-based worker. Should be disabled for CPU heavy tasks that don't
108 a thread-based worker. Should be disabled for CPU heavy tasks that don't
@@ -162,8 +163,8 b' def _posixworker(ui, func, staticargs, a'
162 ui.flush()
163 ui.flush()
163 parentpid = os.getpid()
164 parentpid = os.getpid()
164 pipes = []
165 pipes = []
165 retvals = []
166 retval = {}
166 for i, pargs in enumerate(partition(args, workers)):
167 for pargs in partition(args, workers):
167 # Every worker gets its own pipe to send results on, so we don't have to
168 # Every worker gets its own pipe to send results on, so we don't have to
168 # implement atomic writes larger than PIPE_BUF. Each forked process has
169 # implement atomic writes larger than PIPE_BUF. Each forked process has
169 # its own pipe's descriptors in the local variables, and the parent
170 # its own pipe's descriptors in the local variables, and the parent
@@ -171,7 +172,6 b' def _posixworker(ui, func, staticargs, a'
171 # care what order they're in).
172 # care what order they're in).
172 rfd, wfd = os.pipe()
173 rfd, wfd = os.pipe()
173 pipes.append((rfd, wfd))
174 pipes.append((rfd, wfd))
174 retvals.append(None)
175 # make sure we use os._exit in all worker code paths. otherwise the
175 # make sure we use os._exit in all worker code paths. otherwise the
176 # worker may do some clean-ups which could cause surprises like
176 # worker may do some clean-ups which could cause surprises like
177 # deadlock. see sshpeer.cleanup for example.
177 # deadlock. see sshpeer.cleanup for example.
@@ -192,7 +192,7 b' def _posixworker(ui, func, staticargs, a'
192 os.close(w)
192 os.close(w)
193 os.close(rfd)
193 os.close(rfd)
194 for result in func(*(staticargs + (pargs,))):
194 for result in func(*(staticargs + (pargs,))):
195 os.write(wfd, util.pickle.dumps((i, result)))
195 os.write(wfd, util.pickle.dumps(result))
196 return 0
196 return 0
197
197
198 ret = scmutil.callcatch(ui, workerfunc)
198 ret = scmutil.callcatch(ui, workerfunc)
@@ -226,9 +226,9 b' def _posixworker(ui, func, staticargs, a'
226 while openpipes > 0:
226 while openpipes > 0:
227 for key, events in selector.select():
227 for key, events in selector.select():
228 try:
228 try:
229 i, res = util.pickle.load(key.fileobj)
229 res = util.pickle.load(key.fileobj)
230 if hasretval and res[0]:
230 if hasretval and res[0]:
231 retvals[i] = res[1]
231 retval.update(res[1])
232 else:
232 else:
233 yield res
233 yield res
234 except EOFError:
234 except EOFError:
@@ -249,7 +249,7 b' def _posixworker(ui, func, staticargs, a'
249 os.kill(os.getpid(), -status)
249 os.kill(os.getpid(), -status)
250 sys.exit(status)
250 sys.exit(status)
251 if hasretval:
251 if hasretval:
252 yield True, sum(retvals, [])
252 yield True, retval
253
253
254 def _posixexitstatus(code):
254 def _posixexitstatus(code):
255 '''convert a posix exit status into the same form returned by
255 '''convert a posix exit status into the same form returned by
@@ -281,9 +281,9 b' def _windowsworker(ui, func, staticargs,'
281 try:
281 try:
282 while not self._taskqueue.empty():
282 while not self._taskqueue.empty():
283 try:
283 try:
284 i, args = self._taskqueue.get_nowait()
284 args = self._taskqueue.get_nowait()
285 for res in self._func(*self._staticargs + (args,)):
285 for res in self._func(*self._staticargs + (args,)):
286 self._resultqueue.put((i, res))
286 self._resultqueue.put(res)
287 # threading doesn't provide a native way to
287 # threading doesn't provide a native way to
288 # interrupt execution. handle it manually at every
288 # interrupt execution. handle it manually at every
289 # iteration.
289 # iteration.
@@ -318,11 +318,10 b' def _windowsworker(ui, func, staticargs,'
318 workers = _numworkers(ui)
318 workers = _numworkers(ui)
319 resultqueue = pycompat.queue.Queue()
319 resultqueue = pycompat.queue.Queue()
320 taskqueue = pycompat.queue.Queue()
320 taskqueue = pycompat.queue.Queue()
321 retvals = []
321 retval = {}
322 # partition work to more pieces than workers to minimize the chance
322 # partition work to more pieces than workers to minimize the chance
323 # of uneven distribution of large tasks between the workers
323 # of uneven distribution of large tasks between the workers
324 for pargs in enumerate(partition(args, workers * 20)):
324 for pargs in partition(args, workers * 20):
325 retvals.append(None)
326 taskqueue.put(pargs)
325 taskqueue.put(pargs)
327 for _i in range(workers):
326 for _i in range(workers):
328 t = Worker(taskqueue, resultqueue, func, staticargs)
327 t = Worker(taskqueue, resultqueue, func, staticargs)
@@ -331,9 +330,9 b' def _windowsworker(ui, func, staticargs,'
331 try:
330 try:
332 while len(threads) > 0:
331 while len(threads) > 0:
333 while not resultqueue.empty():
332 while not resultqueue.empty():
334 (i, res) = resultqueue.get()
333 res = resultqueue.get()
335 if hasretval and res[0]:
334 if hasretval and res[0]:
336 retvals[i] = res[1]
335 retval.update(res[1])
337 else:
336 else:
338 yield res
337 yield res
339 threads[0].join(0.05)
338 threads[0].join(0.05)
@@ -346,13 +345,13 b' def _windowsworker(ui, func, staticargs,'
346 trykillworkers()
345 trykillworkers()
347 raise
346 raise
348 while not resultqueue.empty():
347 while not resultqueue.empty():
349 (i, res) = resultqueue.get()
348 res = resultqueue.get()
350 if hasretval and res[0]:
349 if hasretval and res[0]:
351 retvals[i] = res[1]
350 retval.update(res[1])
352 else:
351 else:
353 yield res
352 yield res
354 if hasretval:
353 if hasretval:
355 yield True, sum(retvals, [])
354 yield True, retval
356
355
357 if pycompat.iswindows:
356 if pycompat.iswindows:
358 _platformworker = _windowsworker
357 _platformworker = _windowsworker
@@ -110,24 +110,6 b' update with worker processes'
110 getting 100
110 getting 100
111 100 files updated, 0 files merged, 0 files removed, 0 files unresolved
111 100 files updated, 0 files merged, 0 files removed, 0 files unresolved
112 $ hg status
112 $ hg status
113 M 100
114 M 11
115 M 2
116 M 21
117 M 3
118 M 4
119 M 41
120 M 5
121 M 51
122 M 54
123 M 6
124 M 61
125 M 7
126 M 71
127 M 8
128 M 81
129 M 9
130 M 91
131
113
132 $ cd ..
114 $ cd ..
133
115
General Comments 0
You need to be logged in to leave comments. Login now