Show More
@@ -1472,10 +1472,10 b' def batchget(repo, mctx, wctx, wantfiled' | |||
|
1472 | 1472 | |
|
1473 | 1473 | Yields arbitrarily many (False, tuple) for progress updates, followed by |
|
1474 | 1474 | exactly one (True, filedata). When wantfiledata is false, filedata is an |
|
1475 |
empty |
|
|
1476 |
mtime) of the file written for action |
|
|
1475 | empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size, | |
|
1476 | mtime) of the file f written for each action. | |
|
1477 | 1477 | """ |
|
1478 |
filedata = |
|
|
1478 | filedata = {} | |
|
1479 | 1479 | verbose = repo.ui.verbose |
|
1480 | 1480 | fctx = mctx.filectx |
|
1481 | 1481 | ui = repo.ui |
@@ -1509,7 +1509,7 b' def batchget(repo, mctx, wctx, wantfiled' | |||
|
1509 | 1509 | s = wfctx.lstat() |
|
1510 | 1510 | mode = s.st_mode |
|
1511 | 1511 | mtime = s[stat.ST_MTIME] |
|
1512 |
filedata |
|
|
1512 | filedata[f] = ((mode, size, mtime)) # for dirstate.normal | |
|
1513 | 1513 | if i == 100: |
|
1514 | 1514 | yield False, (i, f) |
|
1515 | 1515 | i = 0 |
@@ -1670,7 +1670,7 b' def applyupdates(repo, actions, wctx, mc' | |||
|
1670 | 1670 | actions[ACTION_GET], |
|
1671 | 1671 | threadsafe=threadsafe, |
|
1672 | 1672 | hasretval=True) |
|
1673 |
getfiledata = |
|
|
1673 | getfiledata = {} | |
|
1674 | 1674 | for final, res in prog: |
|
1675 | 1675 | if final: |
|
1676 | 1676 | getfiledata = res |
@@ -1803,7 +1803,8 b' def applyupdates(repo, actions, wctx, mc' | |||
|
1803 | 1803 | actions[k].extend(acts) |
|
1804 | 1804 | if k == ACTION_GET and wantfiledata: |
|
1805 | 1805 | # no filedata until mergestate is updated to provide it |
|
1806 | getfiledata.extend([None] * len(acts)) | |
|
1806 | for a in acts: | |
|
1807 | getfiledata[a[0]] = None | |
|
1807 | 1808 | # Remove these files from actions[ACTION_MERGE] as well. This is |
|
1808 | 1809 | # important because in recordupdates, files in actions[ACTION_MERGE] |
|
1809 | 1810 | # are processed after files in other actions, and the merge driver |
@@ -1873,11 +1874,11 b' def recordupdates(repo, actions, branchm' | |||
|
1873 | 1874 | pass |
|
1874 | 1875 | |
|
1875 | 1876 | # get |
|
1876 |
for |
|
|
1877 | for f, args, msg in actions.get(ACTION_GET, []): | |
|
1877 | 1878 | if branchmerge: |
|
1878 | 1879 | repo.dirstate.otherparent(f) |
|
1879 | 1880 | else: |
|
1880 |
parentfiledata = getfiledata[ |
|
|
1881 | parentfiledata = getfiledata[f] if getfiledata else None | |
|
1881 | 1882 | repo.dirstate.normal(f, parentfiledata=parentfiledata) |
|
1882 | 1883 | |
|
1883 | 1884 | # merge |
@@ -100,8 +100,9 b' def worker(ui, costperarg, func, statica' | |||
|
100 | 100 | workers |
|
101 | 101 | |
|
102 | 102 | hasretval - when True, func and the current function return an progress |
|
103 |
iterator then a |
|
|
104 |
then a (True, |
|
|
103 | iterator then a dict (encoded as an iterator that yield many (False, ..) | |
|
104 | then a (True, dict)). The dicts are joined in some arbitrary order, so | |
|
105 | overlapping keys are a bad idea. | |
|
105 | 106 | |
|
106 | 107 | threadsafe - whether work items are thread safe and can be executed using |
|
107 | 108 | a thread-based worker. Should be disabled for CPU heavy tasks that don't |
@@ -162,8 +163,8 b' def _posixworker(ui, func, staticargs, a' | |||
|
162 | 163 | ui.flush() |
|
163 | 164 | parentpid = os.getpid() |
|
164 | 165 | pipes = [] |
|
165 |
retval |
|
|
166 |
for |
|
|
166 | retval = {} | |
|
167 | for pargs in partition(args, workers): | |
|
167 | 168 | # Every worker gets its own pipe to send results on, so we don't have to |
|
168 | 169 | # implement atomic writes larger than PIPE_BUF. Each forked process has |
|
169 | 170 | # its own pipe's descriptors in the local variables, and the parent |
@@ -171,7 +172,6 b' def _posixworker(ui, func, staticargs, a' | |||
|
171 | 172 | # care what order they're in). |
|
172 | 173 | rfd, wfd = os.pipe() |
|
173 | 174 | pipes.append((rfd, wfd)) |
|
174 | retvals.append(None) | |
|
175 | 175 | # make sure we use os._exit in all worker code paths. otherwise the |
|
176 | 176 | # worker may do some clean-ups which could cause surprises like |
|
177 | 177 | # deadlock. see sshpeer.cleanup for example. |
@@ -192,7 +192,7 b' def _posixworker(ui, func, staticargs, a' | |||
|
192 | 192 | os.close(w) |
|
193 | 193 | os.close(rfd) |
|
194 | 194 | for result in func(*(staticargs + (pargs,))): |
|
195 |
os.write(wfd, util.pickle.dumps( |
|
|
195 | os.write(wfd, util.pickle.dumps(result)) | |
|
196 | 196 | return 0 |
|
197 | 197 | |
|
198 | 198 | ret = scmutil.callcatch(ui, workerfunc) |
@@ -226,9 +226,9 b' def _posixworker(ui, func, staticargs, a' | |||
|
226 | 226 | while openpipes > 0: |
|
227 | 227 | for key, events in selector.select(): |
|
228 | 228 | try: |
|
229 |
|
|
|
229 | res = util.pickle.load(key.fileobj) | |
|
230 | 230 | if hasretval and res[0]: |
|
231 |
retval |
|
|
231 | retval.update(res[1]) | |
|
232 | 232 | else: |
|
233 | 233 | yield res |
|
234 | 234 | except EOFError: |
@@ -249,7 +249,7 b' def _posixworker(ui, func, staticargs, a' | |||
|
249 | 249 | os.kill(os.getpid(), -status) |
|
250 | 250 | sys.exit(status) |
|
251 | 251 | if hasretval: |
|
252 |
yield True, |
|
|
252 | yield True, retval | |
|
253 | 253 | |
|
254 | 254 | def _posixexitstatus(code): |
|
255 | 255 | '''convert a posix exit status into the same form returned by |
@@ -281,9 +281,9 b' def _windowsworker(ui, func, staticargs,' | |||
|
281 | 281 | try: |
|
282 | 282 | while not self._taskqueue.empty(): |
|
283 | 283 | try: |
|
284 |
|
|
|
284 | args = self._taskqueue.get_nowait() | |
|
285 | 285 | for res in self._func(*self._staticargs + (args,)): |
|
286 |
self._resultqueue.put( |
|
|
286 | self._resultqueue.put(res) | |
|
287 | 287 | # threading doesn't provide a native way to |
|
288 | 288 | # interrupt execution. handle it manually at every |
|
289 | 289 | # iteration. |
@@ -318,11 +318,10 b' def _windowsworker(ui, func, staticargs,' | |||
|
318 | 318 | workers = _numworkers(ui) |
|
319 | 319 | resultqueue = pycompat.queue.Queue() |
|
320 | 320 | taskqueue = pycompat.queue.Queue() |
|
321 |
retval |
|
|
321 | retval = {} | |
|
322 | 322 | # partition work to more pieces than workers to minimize the chance |
|
323 | 323 | # of uneven distribution of large tasks between the workers |
|
324 |
for pargs in |
|
|
325 | retvals.append(None) | |
|
324 | for pargs in partition(args, workers * 20): | |
|
326 | 325 | taskqueue.put(pargs) |
|
327 | 326 | for _i in range(workers): |
|
328 | 327 | t = Worker(taskqueue, resultqueue, func, staticargs) |
@@ -331,9 +330,9 b' def _windowsworker(ui, func, staticargs,' | |||
|
331 | 330 | try: |
|
332 | 331 | while len(threads) > 0: |
|
333 | 332 | while not resultqueue.empty(): |
|
334 |
|
|
|
333 | res = resultqueue.get() | |
|
335 | 334 | if hasretval and res[0]: |
|
336 |
retval |
|
|
335 | retval.update(res[1]) | |
|
337 | 336 | else: |
|
338 | 337 | yield res |
|
339 | 338 | threads[0].join(0.05) |
@@ -346,13 +345,13 b' def _windowsworker(ui, func, staticargs,' | |||
|
346 | 345 | trykillworkers() |
|
347 | 346 | raise |
|
348 | 347 | while not resultqueue.empty(): |
|
349 |
|
|
|
348 | res = resultqueue.get() | |
|
350 | 349 | if hasretval and res[0]: |
|
351 |
retval |
|
|
350 | retval.update(res[1]) | |
|
352 | 351 | else: |
|
353 | 352 | yield res |
|
354 | 353 | if hasretval: |
|
355 |
yield True, |
|
|
354 | yield True, retval | |
|
356 | 355 | |
|
357 | 356 | if pycompat.iswindows: |
|
358 | 357 | _platformworker = _windowsworker |
@@ -110,24 +110,6 b' update with worker processes' | |||
|
110 | 110 | getting 100 |
|
111 | 111 | 100 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
112 | 112 | $ hg status |
|
113 | M 100 | |
|
114 | M 11 | |
|
115 | M 2 | |
|
116 | M 21 | |
|
117 | M 3 | |
|
118 | M 4 | |
|
119 | M 41 | |
|
120 | M 5 | |
|
121 | M 51 | |
|
122 | M 54 | |
|
123 | M 6 | |
|
124 | M 61 | |
|
125 | M 7 | |
|
126 | M 71 | |
|
127 | M 8 | |
|
128 | M 81 | |
|
129 | M 9 | |
|
130 | M 91 | |
|
131 | 113 | |
|
132 | 114 | $ cd .. |
|
133 | 115 |
General Comments 0
You need to be logged in to leave comments.
Login now