Show More
@@ -42,10 +42,59 b' def read(repo):' | |||||
42 | partial = branchcache() |
|
42 | partial = branchcache() | |
43 | return partial |
|
43 | return partial | |
44 |
|
44 | |||
45 | def update(repo, partial, ctxgen): |
|
45 | ||
46 | """Given a branchhead cache, partial, that may have extra nodes or be |
|
46 | ||
|
47 | def updatecache(repo): | |||
|
48 | repo = repo.unfiltered() # Until we get a smarter cache management | |||
|
49 | cl = repo.changelog | |||
|
50 | tip = cl.tip() | |||
|
51 | partial = repo._branchcache | |||
|
52 | if partial is not None and partial.tipnode == tip: | |||
|
53 | return | |||
|
54 | ||||
|
55 | if partial is None or partial.tipnode not in cl.nodemap: | |||
|
56 | partial = read(repo) | |||
|
57 | ||||
|
58 | catip = repo._cacheabletip() | |||
|
59 | # if partial.tiprev == catip: cache is already up to date | |||
|
60 | # if partial.tiprev > catip: we have uncachable element in `partial` can't | |||
|
61 | # write on disk | |||
|
62 | if partial.tiprev < catip: | |||
|
63 | ctxgen = (repo[r] for r in cl.revs(partial.tiprev + 1, catip)) | |||
|
64 | partial.update(repo, ctxgen) | |||
|
65 | partial.write(repo) | |||
|
66 | # If cacheable tip were lower than actual tip, we need to update the | |||
|
67 | # cache up to tip. This update (from cacheable to actual tip) is not | |||
|
68 | # written to disk since it's not cacheable. | |||
|
69 | tiprev = len(repo) - 1 | |||
|
70 | if partial.tiprev < tiprev: | |||
|
71 | ctxgen = (repo[r] for r in cl.revs(partial.tiprev + 1, tiprev)) | |||
|
72 | partial.update(repo, ctxgen) | |||
|
73 | repo._branchcache = partial | |||
|
74 | ||||
|
75 | class branchcache(dict): | |||
|
76 | """A dict like object that hold branches heads cache""" | |||
|
77 | ||||
|
78 | def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev): | |||
|
79 | super(branchcache, self).__init__(entries) | |||
|
80 | self.tipnode = tipnode | |||
|
81 | self.tiprev = tiprev | |||
|
82 | ||||
|
83 | def write(self, repo): | |||
|
84 | try: | |||
|
85 | f = repo.opener("cache/branchheads", "w", atomictemp=True) | |||
|
86 | f.write("%s %s\n" % (hex(self.tipnode), self.tiprev)) | |||
|
87 | for label, nodes in self.iteritems(): | |||
|
88 | for node in nodes: | |||
|
89 | f.write("%s %s\n" % (hex(node), encoding.fromlocal(label))) | |||
|
90 | f.close() | |||
|
91 | except (IOError, OSError): | |||
|
92 | pass | |||
|
93 | ||||
|
94 | def update(self, repo, ctxgen): | |||
|
95 | """Given a branchhead cache, self, that may have extra nodes or be | |||
47 | missing heads, and a generator of nodes that are at least a superset of |
|
96 | missing heads, and a generator of nodes that are at least a superset of | |
48 |
heads missing, this function updates |
|
97 | heads missing, this function updates self to be correct. | |
49 | """ |
|
98 | """ | |
50 | cl = repo.changelog |
|
99 | cl = repo.changelog | |
51 | # collect new branch entries |
|
100 | # collect new branch entries | |
@@ -56,7 +105,7 b' def update(repo, partial, ctxgen):' | |||||
56 | # really branchheads. Note checking parents is insufficient: |
|
105 | # really branchheads. Note checking parents is insufficient: | |
57 | # 1 (branch a) -> 2 (branch b) -> 3 (branch a) |
|
106 | # 1 (branch a) -> 2 (branch b) -> 3 (branch a) | |
58 | for branch, newnodes in newbranches.iteritems(): |
|
107 | for branch, newnodes in newbranches.iteritems(): | |
59 |
bheads = |
|
108 | bheads = self.setdefault(branch, []) | |
60 | # Remove candidate heads that no longer are in the repo (e.g., as |
|
109 | # Remove candidate heads that no longer are in the repo (e.g., as | |
61 | # the result of a strip that just happened). Avoid using 'node in |
|
110 | # the result of a strip that just happened). Avoid using 'node in | |
62 | # self' here because that dives down into branchcache code somewhat |
|
111 | # self' here because that dives down into branchcache code somewhat | |
@@ -90,12 +139,11 b' def update(repo, partial, ctxgen):' | |||||
90 | bheadrevs[0])) |
|
139 | bheadrevs[0])) | |
91 | if ancestors: |
|
140 | if ancestors: | |
92 | bheadrevs = [b for b in bheadrevs if b not in ancestors] |
|
141 | bheadrevs = [b for b in bheadrevs if b not in ancestors] | |
93 |
|
|
142 | self[branch] = [cl.node(rev) for rev in bheadrevs] | |
94 | tiprev = max(bheadrevs) |
|
143 | tiprev = max(bheadrevs) | |
95 |
if tiprev > |
|
144 | if tiprev > self.tiprev: | |
96 |
|
|
145 | self.tipnode = cl.node(tiprev) | |
97 |
|
|
146 | self.tiprev = tiprev | |
98 |
|
||||
99 |
|
147 | |||
100 | # There may be branches that cease to exist when the last commit in the |
|
148 | # There may be branches that cease to exist when the last commit in the | |
101 | # branch was stripped. This code filters them out. Note that the |
|
149 | # branch was stripped. This code filters them out. Note that the | |
@@ -103,71 +151,23 b' def update(repo, partial, ctxgen):' | |||||
103 | # newbranches is the set of candidate heads, which when you strip the |
|
151 | # newbranches is the set of candidate heads, which when you strip the | |
104 | # last commit in a branch will be the parent branch. |
|
152 | # last commit in a branch will be the parent branch. | |
105 | droppednodes = [] |
|
153 | droppednodes = [] | |
106 |
for branch in |
|
154 | for branch in self.keys(): | |
107 |
nodes = [head for head in |
|
155 | nodes = [head for head in self[branch] | |
108 | if cl.hasnode(head)] |
|
156 | if cl.hasnode(head)] | |
109 | if not nodes: |
|
157 | if not nodes: | |
110 | droppednodes.extend(nodes) |
|
158 | droppednodes.extend(nodes) | |
111 |
del |
|
159 | del self[branch] | |
112 | try: |
|
160 | try: | |
113 |
node = cl.node( |
|
161 | node = cl.node(self.tiprev) | |
114 | except IndexError: |
|
162 | except IndexError: | |
115 | node = None |
|
163 | node = None | |
116 |
if (( |
|
164 | if ((self.tipnode != node) | |
117 |
or ( |
|
165 | or (self.tipnode in droppednodes)): | |
118 | # cache key are not valid anymore |
|
166 | # cache key are not valid anymore | |
119 |
|
|
167 | self.tipnode = nullid | |
120 |
|
|
168 | self.tiprev = nullrev | |
121 |
for heads in |
|
169 | for heads in self.values(): | |
122 | tiprev = max(cl.rev(node) for node in heads) |
|
170 | tiprev = max(cl.rev(node) for node in heads) | |
123 |
if tiprev > |
|
171 | if tiprev > self.tiprev: | |
124 |
|
|
172 | self.tipnode = cl.node(tiprev) | |
125 | partial.tiprev = tiprev |
|
|||
126 |
|
||||
127 |
|
||||
128 | def updatecache(repo): |
|
|||
129 | repo = repo.unfiltered() # Until we get a smarter cache management |
|
|||
130 | cl = repo.changelog |
|
|||
131 | tip = cl.tip() |
|
|||
132 | partial = repo._branchcache |
|
|||
133 | if partial is not None and partial.tipnode == tip: |
|
|||
134 | return |
|
|||
135 |
|
||||
136 | if partial is None or partial.tipnode not in cl.nodemap: |
|
|||
137 | partial = read(repo) |
|
|||
138 |
|
||||
139 | catip = repo._cacheabletip() |
|
|||
140 | # if partial.tiprev == catip: cache is already up to date |
|
|||
141 | # if partial.tiprev > catip: we have uncachable element in `partial` can't |
|
|||
142 | # write on disk |
|
|||
143 | if partial.tiprev < catip: |
|
|||
144 | ctxgen = (repo[r] for r in cl.revs(partial.tiprev + 1, catip)) |
|
|||
145 | update(repo, partial, ctxgen) |
|
|||
146 | partial.write(repo) |
|
|||
147 | # If cacheable tip were lower than actual tip, we need to update the |
|
|||
148 | # cache up to tip. This update (from cacheable to actual tip) is not |
|
|||
149 | # written to disk since it's not cacheable. |
|
|||
150 | tiprev = len(repo) - 1 |
|
|||
151 | if partial.tiprev < tiprev: |
|
|||
152 | ctxgen = (repo[r] for r in cl.revs(partial.tiprev + 1, tiprev)) |
|
|||
153 | update(repo, partial, ctxgen) |
|
|||
154 | repo._branchcache = partial |
|
|||
155 |
|
||||
156 | class branchcache(dict): |
|
|||
157 | """A dict like object that hold branches heads cache""" |
|
|||
158 |
|
||||
159 | def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev): |
|
|||
160 | super(branchcache, self).__init__(entries) |
|
|||
161 | self.tipnode = tipnode |
|
|||
162 | self.tiprev = tiprev |
|
173 | self.tiprev = tiprev | |
163 |
|
||||
164 | def write(self, repo): |
|
|||
165 | try: |
|
|||
166 | f = repo.opener("cache/branchheads", "w", atomictemp=True) |
|
|||
167 | f.write("%s %s\n" % (hex(self.tipnode), self.tiprev)) |
|
|||
168 | for label, nodes in self.iteritems(): |
|
|||
169 | for node in nodes: |
|
|||
170 | f.write("%s %s\n" % (hex(node), encoding.fromlocal(label))) |
|
|||
171 | f.close() |
|
|||
172 | except (IOError, OSError): |
|
|||
173 | pass |
|
@@ -196,7 +196,7 b' def _headssummary(repo, remote, outgoing' | |||||
196 | newmap = branchmap.branchcache((branch, heads[1]) |
|
196 | newmap = branchmap.branchcache((branch, heads[1]) | |
197 | for branch, heads in headssum.iteritems() |
|
197 | for branch, heads in headssum.iteritems() | |
198 | if heads[0] is not None) |
|
198 | if heads[0] is not None) | |
199 |
|
|
199 | newmap.update(repo, missingctx) | |
200 | for branch, newheads in newmap.iteritems(): |
|
200 | for branch, newheads in newmap.iteritems(): | |
201 | headssum[branch][1][:] = newheads |
|
201 | headssum[branch][1][:] = newheads | |
202 | return headssum |
|
202 | return headssum |
@@ -666,7 +666,7 b' class localrepository(object):' | |||||
666 | if self.changelog.filteredrevs: |
|
666 | if self.changelog.filteredrevs: | |
667 | # some changeset are excluded we can't use the cache |
|
667 | # some changeset are excluded we can't use the cache | |
668 | bmap = branchmap.branchcache() |
|
668 | bmap = branchmap.branchcache() | |
669 |
b |
|
669 | bmap.update(self, (self[r] for r in self)) | |
670 | return bmap |
|
670 | return bmap | |
671 | else: |
|
671 | else: | |
672 | branchmap.updatecache(self) |
|
672 | branchmap.updatecache(self) | |
@@ -1437,7 +1437,7 b' class localrepository(object):' | |||||
1437 | ctxgen = (self[node] for node in newheadnodes |
|
1437 | ctxgen = (self[node] for node in newheadnodes | |
1438 | if self.changelog.hasnode(node)) |
|
1438 | if self.changelog.hasnode(node)) | |
1439 | cache = self._branchcache |
|
1439 | cache = self._branchcache | |
1440 |
|
|
1440 | cache.update(self, ctxgen) | |
1441 | cache.write(self) |
|
1441 | cache.write(self) | |
1442 |
|
1442 | |||
1443 | # Ensure the persistent tag cache is updated. Doing it now |
|
1443 | # Ensure the persistent tag cache is updated. Doing it now |
General Comments 0
You need to be logged in to leave comments.
Login now