##// END OF EJS Templates
copies: use dedicated `_revinfo_getter` function and call...
marmoute -
r46215:4f876e6b default
parent child Browse files
Show More
@@ -201,73 +201,63 b' def _revinfo_getter(repo):'
201 201
202 202 return ismerged
203 203
204 if repo.filecopiesmode == b'changeset-sidedata':
205 changelogrevision = cl.changelogrevision
206 flags = cl.flags
204 changelogrevision = cl.changelogrevision
205 flags = cl.flags
207 206
208 # A small cache to avoid doing the work twice for merges
209 #
210 # In the vast majority of cases, if we ask information for a revision
211 # about 1 parent, we'll later ask it for the other. So it make sense to
212 # keep the information around when reaching the first parent of a merge
213 # and dropping it after it was provided for the second parents.
214 #
215 # It exists cases were only one parent of the merge will be walked. It
216 # happens when the "destination" the copy tracing is descendant from a
217 # new root, not common with the "source". In that case, we will only walk
218 # through merge parents that are descendant of changesets common
219 # between "source" and "destination".
220 #
221 # With the current case implementation if such changesets have a copy
222 # information, we'll keep them in memory until the end of
223 # _changesetforwardcopies. We don't expect the case to be frequent
224 # enough to matters.
225 #
226 # In addition, it would be possible to reach pathological case, were
227 # many first parent are met before any second parent is reached. In
228 # that case the cache could grow. If this even become an issue one can
229 # safely introduce a maximum cache size. This would trade extra CPU/IO
230 # time to save memory.
231 merge_caches = {}
207 # A small cache to avoid doing the work twice for merges
208 #
209 # In the vast majority of cases, if we ask information for a revision
210 # about 1 parent, we'll later ask it for the other. So it make sense to
211 # keep the information around when reaching the first parent of a merge
212 # and dropping it after it was provided for the second parents.
213 #
214 # It exists cases were only one parent of the merge will be walked. It
215 # happens when the "destination" the copy tracing is descendant from a
216 # new root, not common with the "source". In that case, we will only walk
217 # through merge parents that are descendant of changesets common
218 # between "source" and "destination".
219 #
220 # With the current case implementation if such changesets have a copy
221 # information, we'll keep them in memory until the end of
222 # _changesetforwardcopies. We don't expect the case to be frequent
223 # enough to matters.
224 #
225 # In addition, it would be possible to reach pathological case, were
226 # many first parent are met before any second parent is reached. In
227 # that case the cache could grow. If this even become an issue one can
228 # safely introduce a maximum cache size. This would trade extra CPU/IO
229 # time to save memory.
230 merge_caches = {}
232 231
233 def revinfo(rev):
234 p1, p2 = parents(rev)
235 value = None
236 if flags(rev) & REVIDX_SIDEDATA:
237 e = merge_caches.pop(rev, None)
238 if e is not None:
239 return e
240 c = changelogrevision(rev)
241 p1copies = c.p1copies
242 p2copies = c.p2copies
243 removed = c.filesremoved
244 if p1 != node.nullrev and p2 != node.nullrev:
245 # XXX some case we over cache, IGNORE
246 value = merge_caches[rev] = (
247 p1,
248 p2,
249 p1copies,
250 p2copies,
251 removed,
252 get_ismerged(rev),
253 )
254 else:
255 p1copies = {}
256 p2copies = {}
257 removed = []
232 def revinfo(rev):
233 p1, p2 = parents(rev)
234 value = None
235 if flags(rev) & REVIDX_SIDEDATA:
236 e = merge_caches.pop(rev, None)
237 if e is not None:
238 return e
239 c = changelogrevision(rev)
240 p1copies = c.p1copies
241 p2copies = c.p2copies
242 removed = c.filesremoved
243 if p1 != node.nullrev and p2 != node.nullrev:
244 # XXX some case we over cache, IGNORE
245 value = merge_caches[rev] = (
246 p1,
247 p2,
248 p1copies,
249 p2copies,
250 removed,
251 get_ismerged(rev),
252 )
253 else:
254 p1copies = {}
255 p2copies = {}
256 removed = []
258 257
259 if value is None:
260 value = (p1, p2, p1copies, p2copies, removed, get_ismerged(rev))
261 return value
262
263 else:
264
265 def revinfo(rev):
266 p1, p2 = parents(rev)
267 ctx = repo[rev]
268 p1copies, p2copies = ctx._copies
269 removed = ctx.filesremoved()
270 return p1, p2, p1copies, p2copies, removed, get_ismerged(rev)
258 if value is None:
259 value = (p1, p2, p1copies, p2copies, removed, get_ismerged(rev))
260 return value
271 261
272 262 return revinfo
273 263
@@ -278,7 +268,6 b' def _changesetforwardcopies(a, b, match)'
278 268
279 269 repo = a.repo().unfiltered()
280 270 children = {}
281 revinfo = _revinfo_getter(repo)
282 271
283 272 cl = repo.changelog
284 273 isancestor = cl.isancestorrev # XXX we should had chaching to this.
@@ -311,10 +300,12 b' def _changesetforwardcopies(a, b, match)'
311 300 revs = sorted(iterrevs)
312 301
313 302 if repo.filecopiesmode == b'changeset-sidedata':
303 revinfo = _revinfo_getter(repo)
314 304 return _combine_changeset_copies(
315 305 revs, children, b.rev(), revinfo, match, isancestor
316 306 )
317 307 else:
308 revinfo = _revinfo_getter_extra(repo)
318 309 return _combine_changeset_copies_extra(
319 310 revs, children, b.rev(), revinfo, match, isancestor
320 311 )
@@ -428,6 +419,45 b' def _merge_copies_dict(minor, major, isa'
428 419 minor[dest] = value
429 420
430 421
422 def _revinfo_getter_extra(repo):
423 """return a function that return multiple data given a <rev>"i
424
425 * p1: revision number of first parent
426 * p2: revision number of first parent
427 * p1copies: mapping of copies from p1
428 * p2copies: mapping of copies from p2
429 * removed: a list of removed files
430 * ismerged: a callback to know if file was merged in that revision
431 """
432 cl = repo.changelog
433 parents = cl.parentrevs
434
435 def get_ismerged(rev):
436 ctx = repo[rev]
437
438 def ismerged(path):
439 if path not in ctx.files():
440 return False
441 fctx = ctx[path]
442 parents = fctx._filelog.parents(fctx._filenode)
443 nb_parents = 0
444 for n in parents:
445 if n != node.nullid:
446 nb_parents += 1
447 return nb_parents >= 2
448
449 return ismerged
450
451 def revinfo(rev):
452 p1, p2 = parents(rev)
453 ctx = repo[rev]
454 p1copies, p2copies = ctx._copies
455 removed = ctx.filesremoved()
456 return p1, p2, p1copies, p2copies, removed, get_ismerged(rev)
457
458 return revinfo
459
460
431 461 def _combine_changeset_copies_extra(
432 462 revs, children, targetrev, revinfo, match, isancestor
433 463 ):
General Comments 0
You need to be logged in to leave comments. Login now