Show More
@@ -196,67 +196,27 b' def _revinfo_getter(repo, match):' | |||||
196 |
|
196 | |||
197 | changelogrevision = cl.changelogrevision |
|
197 | changelogrevision = cl.changelogrevision | |
198 |
|
198 | |||
199 | # A small cache to avoid doing the work twice for merges |
|
|||
200 | # |
|
|||
201 | # In the vast majority of cases, if we ask information for a revision |
|
|||
202 | # about 1 parent, we'll later ask it for the other. So it make sense to |
|
|||
203 | # keep the information around when reaching the first parent of a merge |
|
|||
204 | # and dropping it after it was provided for the second parents. |
|
|||
205 | # |
|
|||
206 | # It exists cases were only one parent of the merge will be walked. It |
|
|||
207 | # happens when the "destination" the copy tracing is descendant from a |
|
|||
208 | # new root, not common with the "source". In that case, we will only walk |
|
|||
209 | # through merge parents that are descendant of changesets common |
|
|||
210 | # between "source" and "destination". |
|
|||
211 | # |
|
|||
212 | # With the current case implementation if such changesets have a copy |
|
|||
213 | # information, we'll keep them in memory until the end of |
|
|||
214 | # _changesetforwardcopies. We don't expect the case to be frequent |
|
|||
215 | # enough to matters. |
|
|||
216 | # |
|
|||
217 | # In addition, it would be possible to reach pathological case, were |
|
|||
218 | # many first parent are met before any second parent is reached. In |
|
|||
219 | # that case the cache could grow. If this even become an issue one can |
|
|||
220 | # safely introduce a maximum cache size. This would trade extra CPU/IO |
|
|||
221 | # time to save memory. |
|
|||
222 | merge_caches = {} |
|
|||
223 |
|
||||
224 | alwaysmatch = match.always() |
|
199 | alwaysmatch = match.always() | |
225 |
|
200 | |||
226 | if rustmod is not None and alwaysmatch: |
|
201 | if rustmod is not None and alwaysmatch: | |
227 |
|
202 | |||
228 | def revinfo(rev): |
|
203 | def revinfo(rev): | |
229 | p1, p2 = parents(rev) |
|
204 | p1, p2 = parents(rev) | |
230 | value = None |
|
|||
231 | e = merge_caches.pop(rev, None) |
|
|||
232 | if e is not None: |
|
|||
233 | return e |
|
|||
234 | if flags(rev) & HASCOPIESINFO: |
|
205 | if flags(rev) & HASCOPIESINFO: | |
235 | raw = changelogrevision(rev)._sidedata.get(sidedatamod.SD_FILES) |
|
206 | raw = changelogrevision(rev)._sidedata.get(sidedatamod.SD_FILES) | |
236 | else: |
|
207 | else: | |
237 | raw = None |
|
208 | raw = None | |
238 |
|
|
209 | return (p1, p2, raw) | |
239 | if p1 != nullrev and p2 != nullrev: |
|
|||
240 | # XXX some case we over cache, IGNORE |
|
|||
241 | merge_caches[rev] = value |
|
|||
242 | return value |
|
|||
243 |
|
210 | |||
244 | else: |
|
211 | else: | |
245 |
|
212 | |||
246 | def revinfo(rev): |
|
213 | def revinfo(rev): | |
247 | p1, p2 = parents(rev) |
|
214 | p1, p2 = parents(rev) | |
248 | value = None |
|
|||
249 | e = merge_caches.pop(rev, None) |
|
|||
250 | if e is not None: |
|
|||
251 | return e |
|
|||
252 | changes = None |
|
|||
253 | if flags(rev) & HASCOPIESINFO: |
|
215 | if flags(rev) & HASCOPIESINFO: | |
254 | changes = changelogrevision(rev).changes |
|
216 | changes = changelogrevision(rev).changes | |
255 | value = (p1, p2, changes) |
|
217 | else: | |
256 | if p1 != nullrev and p2 != nullrev: |
|
218 | changes = None | |
257 | # XXX some case we over cache, IGNORE |
|
219 | return (p1, p2, changes) | |
258 | merge_caches[rev] = value |
|
|||
259 | return value |
|
|||
260 |
|
220 | |||
261 | return revinfo |
|
221 | return revinfo | |
262 |
|
222 |
General Comments 0
You need to be logged in to leave comments.
Login now