Show More
@@ -1,487 +1,485 b'' | |||||
1 | # copies.py - copy detection for Mercurial |
|
1 | # copies.py - copy detection for Mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2008 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2008 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | import util |
|
8 | import util | |
9 | import heapq |
|
9 | import heapq | |
10 |
|
10 | |||
11 | def _nonoverlap(d1, d2, d3): |
|
11 | def _nonoverlap(d1, d2, d3): | |
12 | "Return list of elements in d1 not in d2 or d3" |
|
12 | "Return list of elements in d1 not in d2 or d3" | |
13 | return sorted([d for d in d1 if d not in d3 and d not in d2]) |
|
13 | return sorted([d for d in d1 if d not in d3 and d not in d2]) | |
14 |
|
14 | |||
15 | def _dirname(f): |
|
15 | def _dirname(f): | |
16 | s = f.rfind("/") |
|
16 | s = f.rfind("/") | |
17 | if s == -1: |
|
17 | if s == -1: | |
18 | return "" |
|
18 | return "" | |
19 | return f[:s] |
|
19 | return f[:s] | |
20 |
|
20 | |||
21 | def _findlimit(repo, a, b): |
|
21 | def _findlimit(repo, a, b): | |
22 | """ |
|
22 | """ | |
23 | Find the last revision that needs to be checked to ensure that a full |
|
23 | Find the last revision that needs to be checked to ensure that a full | |
24 | transitive closure for file copies can be properly calculated. |
|
24 | transitive closure for file copies can be properly calculated. | |
25 | Generally, this means finding the earliest revision number that's an |
|
25 | Generally, this means finding the earliest revision number that's an | |
26 | ancestor of a or b but not both, except when a or b is a direct descendent |
|
26 | ancestor of a or b but not both, except when a or b is a direct descendent | |
27 | of the other, in which case we can return the minimum revnum of a and b. |
|
27 | of the other, in which case we can return the minimum revnum of a and b. | |
28 | None if no such revision exists. |
|
28 | None if no such revision exists. | |
29 | """ |
|
29 | """ | |
30 |
|
30 | |||
31 | # basic idea: |
|
31 | # basic idea: | |
32 | # - mark a and b with different sides |
|
32 | # - mark a and b with different sides | |
33 | # - if a parent's children are all on the same side, the parent is |
|
33 | # - if a parent's children are all on the same side, the parent is | |
34 | # on that side, otherwise it is on no side |
|
34 | # on that side, otherwise it is on no side | |
35 | # - walk the graph in topological order with the help of a heap; |
|
35 | # - walk the graph in topological order with the help of a heap; | |
36 | # - add unseen parents to side map |
|
36 | # - add unseen parents to side map | |
37 | # - clear side of any parent that has children on different sides |
|
37 | # - clear side of any parent that has children on different sides | |
38 | # - track number of interesting revs that might still be on a side |
|
38 | # - track number of interesting revs that might still be on a side | |
39 | # - track the lowest interesting rev seen |
|
39 | # - track the lowest interesting rev seen | |
40 | # - quit when interesting revs is zero |
|
40 | # - quit when interesting revs is zero | |
41 |
|
41 | |||
42 | cl = repo.changelog |
|
42 | cl = repo.changelog | |
43 | working = len(cl) # pseudo rev for the working directory |
|
43 | working = len(cl) # pseudo rev for the working directory | |
44 | if a is None: |
|
44 | if a is None: | |
45 | a = working |
|
45 | a = working | |
46 | if b is None: |
|
46 | if b is None: | |
47 | b = working |
|
47 | b = working | |
48 |
|
48 | |||
49 | side = {a: -1, b: 1} |
|
49 | side = {a: -1, b: 1} | |
50 | visit = [-a, -b] |
|
50 | visit = [-a, -b] | |
51 | heapq.heapify(visit) |
|
51 | heapq.heapify(visit) | |
52 | interesting = len(visit) |
|
52 | interesting = len(visit) | |
53 | hascommonancestor = False |
|
53 | hascommonancestor = False | |
54 | limit = working |
|
54 | limit = working | |
55 |
|
55 | |||
56 | while interesting: |
|
56 | while interesting: | |
57 | r = -heapq.heappop(visit) |
|
57 | r = -heapq.heappop(visit) | |
58 | if r == working: |
|
58 | if r == working: | |
59 | parents = [cl.rev(p) for p in repo.dirstate.parents()] |
|
59 | parents = [cl.rev(p) for p in repo.dirstate.parents()] | |
60 | else: |
|
60 | else: | |
61 | parents = cl.parentrevs(r) |
|
61 | parents = cl.parentrevs(r) | |
62 | for p in parents: |
|
62 | for p in parents: | |
63 | if p < 0: |
|
63 | if p < 0: | |
64 | continue |
|
64 | continue | |
65 | if p not in side: |
|
65 | if p not in side: | |
66 | # first time we see p; add it to visit |
|
66 | # first time we see p; add it to visit | |
67 | side[p] = side[r] |
|
67 | side[p] = side[r] | |
68 | if side[p]: |
|
68 | if side[p]: | |
69 | interesting += 1 |
|
69 | interesting += 1 | |
70 | heapq.heappush(visit, -p) |
|
70 | heapq.heappush(visit, -p) | |
71 | elif side[p] and side[p] != side[r]: |
|
71 | elif side[p] and side[p] != side[r]: | |
72 | # p was interesting but now we know better |
|
72 | # p was interesting but now we know better | |
73 | side[p] = 0 |
|
73 | side[p] = 0 | |
74 | interesting -= 1 |
|
74 | interesting -= 1 | |
75 | hascommonancestor = True |
|
75 | hascommonancestor = True | |
76 | if side[r]: |
|
76 | if side[r]: | |
77 | limit = r # lowest rev visited |
|
77 | limit = r # lowest rev visited | |
78 | interesting -= 1 |
|
78 | interesting -= 1 | |
79 |
|
79 | |||
80 | if not hascommonancestor: |
|
80 | if not hascommonancestor: | |
81 | return None |
|
81 | return None | |
82 |
|
82 | |||
83 | # Consider the following flow (see test-commit-amend.t under issue4405): |
|
83 | # Consider the following flow (see test-commit-amend.t under issue4405): | |
84 | # 1/ File 'a0' committed |
|
84 | # 1/ File 'a0' committed | |
85 | # 2/ File renamed from 'a0' to 'a1' in a new commit (call it 'a1') |
|
85 | # 2/ File renamed from 'a0' to 'a1' in a new commit (call it 'a1') | |
86 | # 3/ Move back to first commit |
|
86 | # 3/ Move back to first commit | |
87 | # 4/ Create a new commit via revert to contents of 'a1' (call it 'a1-amend') |
|
87 | # 4/ Create a new commit via revert to contents of 'a1' (call it 'a1-amend') | |
88 | # 5/ Rename file from 'a1' to 'a2' and commit --amend 'a1-msg' |
|
88 | # 5/ Rename file from 'a1' to 'a2' and commit --amend 'a1-msg' | |
89 | # |
|
89 | # | |
90 | # During the amend in step five, we will be in this state: |
|
90 | # During the amend in step five, we will be in this state: | |
91 | # |
|
91 | # | |
92 | # @ 3 temporary amend commit for a1-amend |
|
92 | # @ 3 temporary amend commit for a1-amend | |
93 | # | |
|
93 | # | | |
94 | # o 2 a1-amend |
|
94 | # o 2 a1-amend | |
95 | # | |
|
95 | # | | |
96 | # | o 1 a1 |
|
96 | # | o 1 a1 | |
97 | # |/ |
|
97 | # |/ | |
98 | # o 0 a0 |
|
98 | # o 0 a0 | |
99 | # |
|
99 | # | |
100 | # When _findlimit is called, a and b are revs 3 and 0, so limit will be 2, |
|
100 | # When _findlimit is called, a and b are revs 3 and 0, so limit will be 2, | |
101 | # yet the filelog has the copy information in rev 1 and we will not look |
|
101 | # yet the filelog has the copy information in rev 1 and we will not look | |
102 | # back far enough unless we also look at the a and b as candidates. |
|
102 | # back far enough unless we also look at the a and b as candidates. | |
103 | # This only occurs when a is a descendent of b or visa-versa. |
|
103 | # This only occurs when a is a descendent of b or visa-versa. | |
104 | return min(limit, a, b) |
|
104 | return min(limit, a, b) | |
105 |
|
105 | |||
106 | def _chain(src, dst, a, b): |
|
106 | def _chain(src, dst, a, b): | |
107 | '''chain two sets of copies a->b''' |
|
107 | '''chain two sets of copies a->b''' | |
108 | t = a.copy() |
|
108 | t = a.copy() | |
109 | for k, v in b.iteritems(): |
|
109 | for k, v in b.iteritems(): | |
110 | if v in t: |
|
110 | if v in t: | |
111 | # found a chain |
|
111 | # found a chain | |
112 | if t[v] != k: |
|
112 | if t[v] != k: | |
113 | # file wasn't renamed back to itself |
|
113 | # file wasn't renamed back to itself | |
114 | t[k] = t[v] |
|
114 | t[k] = t[v] | |
115 | if v not in dst: |
|
115 | if v not in dst: | |
116 | # chain was a rename, not a copy |
|
116 | # chain was a rename, not a copy | |
117 | del t[v] |
|
117 | del t[v] | |
118 | if v in src: |
|
118 | if v in src: | |
119 | # file is a copy of an existing file |
|
119 | # file is a copy of an existing file | |
120 | t[k] = v |
|
120 | t[k] = v | |
121 |
|
121 | |||
122 | # remove criss-crossed copies |
|
122 | # remove criss-crossed copies | |
123 | for k, v in t.items(): |
|
123 | for k, v in t.items(): | |
124 | if k in src and v in dst: |
|
124 | if k in src and v in dst: | |
125 | del t[k] |
|
125 | del t[k] | |
126 |
|
126 | |||
127 | return t |
|
127 | return t | |
128 |
|
128 | |||
129 | def _tracefile(fctx, am, limit=-1): |
|
129 | def _tracefile(fctx, am, limit=-1): | |
130 | '''return file context that is the ancestor of fctx present in ancestor |
|
130 | '''return file context that is the ancestor of fctx present in ancestor | |
131 | manifest am, stopping after the first ancestor lower than limit''' |
|
131 | manifest am, stopping after the first ancestor lower than limit''' | |
132 |
|
132 | |||
133 | for f in fctx.ancestors(): |
|
133 | for f in fctx.ancestors(): | |
134 | if am.get(f.path(), None) == f.filenode(): |
|
134 | if am.get(f.path(), None) == f.filenode(): | |
135 | return f |
|
135 | return f | |
136 | if limit >= 0 and f.linkrev() < limit and f.rev() < limit: |
|
136 | if limit >= 0 and f.linkrev() < limit and f.rev() < limit: | |
137 | return None |
|
137 | return None | |
138 |
|
138 | |||
139 | def _dirstatecopies(d): |
|
139 | def _dirstatecopies(d): | |
140 | ds = d._repo.dirstate |
|
140 | ds = d._repo.dirstate | |
141 | c = ds.copies().copy() |
|
141 | c = ds.copies().copy() | |
142 | for k in c.keys(): |
|
142 | for k in c.keys(): | |
143 | if ds[k] not in 'anm': |
|
143 | if ds[k] not in 'anm': | |
144 | del c[k] |
|
144 | del c[k] | |
145 | return c |
|
145 | return c | |
146 |
|
146 | |||
147 | def _computeforwardmissing(a, b): |
|
147 | def _computeforwardmissing(a, b): | |
148 | """Computes which files are in b but not a. |
|
148 | """Computes which files are in b but not a. | |
149 | This is its own function so extensions can easily wrap this call to see what |
|
149 | This is its own function so extensions can easily wrap this call to see what | |
150 | files _forwardcopies is about to process. |
|
150 | files _forwardcopies is about to process. | |
151 | """ |
|
151 | """ | |
152 | missing = set(b.manifest().iterkeys()) |
|
152 | return b.manifest().filesnotin(a.manifest()) | |
153 | missing.difference_update(a.manifest().iterkeys()) |
|
|||
154 | return missing |
|
|||
155 |
|
153 | |||
156 | def _forwardcopies(a, b): |
|
154 | def _forwardcopies(a, b): | |
157 | '''find {dst@b: src@a} copy mapping where a is an ancestor of b''' |
|
155 | '''find {dst@b: src@a} copy mapping where a is an ancestor of b''' | |
158 |
|
156 | |||
159 | # check for working copy |
|
157 | # check for working copy | |
160 | w = None |
|
158 | w = None | |
161 | if b.rev() is None: |
|
159 | if b.rev() is None: | |
162 | w = b |
|
160 | w = b | |
163 | b = w.p1() |
|
161 | b = w.p1() | |
164 | if a == b: |
|
162 | if a == b: | |
165 | # short-circuit to avoid issues with merge states |
|
163 | # short-circuit to avoid issues with merge states | |
166 | return _dirstatecopies(w) |
|
164 | return _dirstatecopies(w) | |
167 |
|
165 | |||
168 | # files might have to be traced back to the fctx parent of the last |
|
166 | # files might have to be traced back to the fctx parent of the last | |
169 | # one-side-only changeset, but not further back than that |
|
167 | # one-side-only changeset, but not further back than that | |
170 | limit = _findlimit(a._repo, a.rev(), b.rev()) |
|
168 | limit = _findlimit(a._repo, a.rev(), b.rev()) | |
171 | if limit is None: |
|
169 | if limit is None: | |
172 | limit = -1 |
|
170 | limit = -1 | |
173 | am = a.manifest() |
|
171 | am = a.manifest() | |
174 |
|
172 | |||
175 | # find where new files came from |
|
173 | # find where new files came from | |
176 | # we currently don't try to find where old files went, too expensive |
|
174 | # we currently don't try to find where old files went, too expensive | |
177 | # this means we can miss a case like 'hg rm b; hg cp a b' |
|
175 | # this means we can miss a case like 'hg rm b; hg cp a b' | |
178 | cm = {} |
|
176 | cm = {} | |
179 | missing = _computeforwardmissing(a, b) |
|
177 | missing = _computeforwardmissing(a, b) | |
180 | ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True) |
|
178 | ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True) | |
181 | for f in missing: |
|
179 | for f in missing: | |
182 | fctx = b[f] |
|
180 | fctx = b[f] | |
183 | fctx._ancestrycontext = ancestrycontext |
|
181 | fctx._ancestrycontext = ancestrycontext | |
184 | ofctx = _tracefile(fctx, am, limit) |
|
182 | ofctx = _tracefile(fctx, am, limit) | |
185 | if ofctx: |
|
183 | if ofctx: | |
186 | cm[f] = ofctx.path() |
|
184 | cm[f] = ofctx.path() | |
187 |
|
185 | |||
188 | # combine copies from dirstate if necessary |
|
186 | # combine copies from dirstate if necessary | |
189 | if w is not None: |
|
187 | if w is not None: | |
190 | cm = _chain(a, w, cm, _dirstatecopies(w)) |
|
188 | cm = _chain(a, w, cm, _dirstatecopies(w)) | |
191 |
|
189 | |||
192 | return cm |
|
190 | return cm | |
193 |
|
191 | |||
194 | def _backwardrenames(a, b): |
|
192 | def _backwardrenames(a, b): | |
195 | # Even though we're not taking copies into account, 1:n rename situations |
|
193 | # Even though we're not taking copies into account, 1:n rename situations | |
196 | # can still exist (e.g. hg cp a b; hg mv a c). In those cases we |
|
194 | # can still exist (e.g. hg cp a b; hg mv a c). In those cases we | |
197 | # arbitrarily pick one of the renames. |
|
195 | # arbitrarily pick one of the renames. | |
198 | f = _forwardcopies(b, a) |
|
196 | f = _forwardcopies(b, a) | |
199 | r = {} |
|
197 | r = {} | |
200 | for k, v in sorted(f.iteritems()): |
|
198 | for k, v in sorted(f.iteritems()): | |
201 | # remove copies |
|
199 | # remove copies | |
202 | if v in a: |
|
200 | if v in a: | |
203 | continue |
|
201 | continue | |
204 | r[v] = k |
|
202 | r[v] = k | |
205 | return r |
|
203 | return r | |
206 |
|
204 | |||
207 | def pathcopies(x, y): |
|
205 | def pathcopies(x, y): | |
208 | '''find {dst@y: src@x} copy mapping for directed compare''' |
|
206 | '''find {dst@y: src@x} copy mapping for directed compare''' | |
209 | if x == y or not x or not y: |
|
207 | if x == y or not x or not y: | |
210 | return {} |
|
208 | return {} | |
211 | a = y.ancestor(x) |
|
209 | a = y.ancestor(x) | |
212 | if a == x: |
|
210 | if a == x: | |
213 | return _forwardcopies(x, y) |
|
211 | return _forwardcopies(x, y) | |
214 | if a == y: |
|
212 | if a == y: | |
215 | return _backwardrenames(x, y) |
|
213 | return _backwardrenames(x, y) | |
216 | return _chain(x, y, _backwardrenames(x, a), _forwardcopies(a, y)) |
|
214 | return _chain(x, y, _backwardrenames(x, a), _forwardcopies(a, y)) | |
217 |
|
215 | |||
218 | def _computenonoverlap(repo, m1, m2, ma): |
|
216 | def _computenonoverlap(repo, m1, m2, ma): | |
219 | """Computes the files exclusive to m1 and m2. |
|
217 | """Computes the files exclusive to m1 and m2. | |
220 | This is its own function so extensions can easily wrap this call to see what |
|
218 | This is its own function so extensions can easily wrap this call to see what | |
221 | files mergecopies is about to process. |
|
219 | files mergecopies is about to process. | |
222 | """ |
|
220 | """ | |
223 | u1 = _nonoverlap(m1, m2, ma) |
|
221 | u1 = _nonoverlap(m1, m2, ma) | |
224 | u2 = _nonoverlap(m2, m1, ma) |
|
222 | u2 = _nonoverlap(m2, m1, ma) | |
225 |
|
223 | |||
226 | if u1: |
|
224 | if u1: | |
227 | repo.ui.debug(" unmatched files in local:\n %s\n" |
|
225 | repo.ui.debug(" unmatched files in local:\n %s\n" | |
228 | % "\n ".join(u1)) |
|
226 | % "\n ".join(u1)) | |
229 | if u2: |
|
227 | if u2: | |
230 | repo.ui.debug(" unmatched files in other:\n %s\n" |
|
228 | repo.ui.debug(" unmatched files in other:\n %s\n" | |
231 | % "\n ".join(u2)) |
|
229 | % "\n ".join(u2)) | |
232 | return u1, u2 |
|
230 | return u1, u2 | |
233 |
|
231 | |||
234 | def mergecopies(repo, c1, c2, ca): |
|
232 | def mergecopies(repo, c1, c2, ca): | |
235 | """ |
|
233 | """ | |
236 | Find moves and copies between context c1 and c2 that are relevant |
|
234 | Find moves and copies between context c1 and c2 that are relevant | |
237 | for merging. |
|
235 | for merging. | |
238 |
|
236 | |||
239 | Returns four dicts: "copy", "movewithdir", "diverge", and |
|
237 | Returns four dicts: "copy", "movewithdir", "diverge", and | |
240 | "renamedelete". |
|
238 | "renamedelete". | |
241 |
|
239 | |||
242 | "copy" is a mapping from destination name -> source name, |
|
240 | "copy" is a mapping from destination name -> source name, | |
243 | where source is in c1 and destination is in c2 or vice-versa. |
|
241 | where source is in c1 and destination is in c2 or vice-versa. | |
244 |
|
242 | |||
245 | "movewithdir" is a mapping from source name -> destination name, |
|
243 | "movewithdir" is a mapping from source name -> destination name, | |
246 | where the file at source present in one context but not the other |
|
244 | where the file at source present in one context but not the other | |
247 | needs to be moved to destination by the merge process, because the |
|
245 | needs to be moved to destination by the merge process, because the | |
248 | other context moved the directory it is in. |
|
246 | other context moved the directory it is in. | |
249 |
|
247 | |||
250 | "diverge" is a mapping of source name -> list of destination names |
|
248 | "diverge" is a mapping of source name -> list of destination names | |
251 | for divergent renames. |
|
249 | for divergent renames. | |
252 |
|
250 | |||
253 | "renamedelete" is a mapping of source name -> list of destination |
|
251 | "renamedelete" is a mapping of source name -> list of destination | |
254 | names for files deleted in c1 that were renamed in c2 or vice-versa. |
|
252 | names for files deleted in c1 that were renamed in c2 or vice-versa. | |
255 | """ |
|
253 | """ | |
256 | # avoid silly behavior for update from empty dir |
|
254 | # avoid silly behavior for update from empty dir | |
257 | if not c1 or not c2 or c1 == c2: |
|
255 | if not c1 or not c2 or c1 == c2: | |
258 | return {}, {}, {}, {} |
|
256 | return {}, {}, {}, {} | |
259 |
|
257 | |||
260 | # avoid silly behavior for parent -> working dir |
|
258 | # avoid silly behavior for parent -> working dir | |
261 | if c2.node() is None and c1.node() == repo.dirstate.p1(): |
|
259 | if c2.node() is None and c1.node() == repo.dirstate.p1(): | |
262 | return repo.dirstate.copies(), {}, {}, {} |
|
260 | return repo.dirstate.copies(), {}, {}, {} | |
263 |
|
261 | |||
264 | limit = _findlimit(repo, c1.rev(), c2.rev()) |
|
262 | limit = _findlimit(repo, c1.rev(), c2.rev()) | |
265 | if limit is None: |
|
263 | if limit is None: | |
266 | # no common ancestor, no copies |
|
264 | # no common ancestor, no copies | |
267 | return {}, {}, {}, {} |
|
265 | return {}, {}, {}, {} | |
268 | m1 = c1.manifest() |
|
266 | m1 = c1.manifest() | |
269 | m2 = c2.manifest() |
|
267 | m2 = c2.manifest() | |
270 | ma = ca.manifest() |
|
268 | ma = ca.manifest() | |
271 |
|
269 | |||
272 | def makectx(f, n): |
|
270 | def makectx(f, n): | |
273 | if len(n) != 20: # in a working context? |
|
271 | if len(n) != 20: # in a working context? | |
274 | if c1.rev() is None: |
|
272 | if c1.rev() is None: | |
275 | return c1.filectx(f) |
|
273 | return c1.filectx(f) | |
276 | return c2.filectx(f) |
|
274 | return c2.filectx(f) | |
277 | return repo.filectx(f, fileid=n) |
|
275 | return repo.filectx(f, fileid=n) | |
278 |
|
276 | |||
279 | ctx = util.lrucachefunc(makectx) |
|
277 | ctx = util.lrucachefunc(makectx) | |
280 | copy = {} |
|
278 | copy = {} | |
281 | movewithdir = {} |
|
279 | movewithdir = {} | |
282 | fullcopy = {} |
|
280 | fullcopy = {} | |
283 | diverge = {} |
|
281 | diverge = {} | |
284 |
|
282 | |||
285 | repo.ui.debug(" searching for copies back to rev %d\n" % limit) |
|
283 | repo.ui.debug(" searching for copies back to rev %d\n" % limit) | |
286 |
|
284 | |||
287 | u1, u2 = _computenonoverlap(repo, m1, m2, ma) |
|
285 | u1, u2 = _computenonoverlap(repo, m1, m2, ma) | |
288 |
|
286 | |||
289 | for f in u1: |
|
287 | for f in u1: | |
290 | checkcopies(ctx, f, m1, m2, ca, limit, diverge, copy, fullcopy) |
|
288 | checkcopies(ctx, f, m1, m2, ca, limit, diverge, copy, fullcopy) | |
291 |
|
289 | |||
292 | for f in u2: |
|
290 | for f in u2: | |
293 | checkcopies(ctx, f, m2, m1, ca, limit, diverge, copy, fullcopy) |
|
291 | checkcopies(ctx, f, m2, m1, ca, limit, diverge, copy, fullcopy) | |
294 |
|
292 | |||
295 | renamedelete = {} |
|
293 | renamedelete = {} | |
296 | renamedelete2 = set() |
|
294 | renamedelete2 = set() | |
297 | diverge2 = set() |
|
295 | diverge2 = set() | |
298 | for of, fl in diverge.items(): |
|
296 | for of, fl in diverge.items(): | |
299 | if len(fl) == 1 or of in c1 or of in c2: |
|
297 | if len(fl) == 1 or of in c1 or of in c2: | |
300 | del diverge[of] # not actually divergent, or not a rename |
|
298 | del diverge[of] # not actually divergent, or not a rename | |
301 | if of not in c1 and of not in c2: |
|
299 | if of not in c1 and of not in c2: | |
302 | # renamed on one side, deleted on the other side, but filter |
|
300 | # renamed on one side, deleted on the other side, but filter | |
303 | # out files that have been renamed and then deleted |
|
301 | # out files that have been renamed and then deleted | |
304 | renamedelete[of] = [f for f in fl if f in c1 or f in c2] |
|
302 | renamedelete[of] = [f for f in fl if f in c1 or f in c2] | |
305 | renamedelete2.update(fl) # reverse map for below |
|
303 | renamedelete2.update(fl) # reverse map for below | |
306 | else: |
|
304 | else: | |
307 | diverge2.update(fl) # reverse map for below |
|
305 | diverge2.update(fl) # reverse map for below | |
308 |
|
306 | |||
309 | bothnew = sorted([d for d in m1 if d in m2 and d not in ma]) |
|
307 | bothnew = sorted([d for d in m1 if d in m2 and d not in ma]) | |
310 | if bothnew: |
|
308 | if bothnew: | |
311 | repo.ui.debug(" unmatched files new in both:\n %s\n" |
|
309 | repo.ui.debug(" unmatched files new in both:\n %s\n" | |
312 | % "\n ".join(bothnew)) |
|
310 | % "\n ".join(bothnew)) | |
313 | bothdiverge, _copy, _fullcopy = {}, {}, {} |
|
311 | bothdiverge, _copy, _fullcopy = {}, {}, {} | |
314 | for f in bothnew: |
|
312 | for f in bothnew: | |
315 | checkcopies(ctx, f, m1, m2, ca, limit, bothdiverge, _copy, _fullcopy) |
|
313 | checkcopies(ctx, f, m1, m2, ca, limit, bothdiverge, _copy, _fullcopy) | |
316 | checkcopies(ctx, f, m2, m1, ca, limit, bothdiverge, _copy, _fullcopy) |
|
314 | checkcopies(ctx, f, m2, m1, ca, limit, bothdiverge, _copy, _fullcopy) | |
317 | for of, fl in bothdiverge.items(): |
|
315 | for of, fl in bothdiverge.items(): | |
318 | if len(fl) == 2 and fl[0] == fl[1]: |
|
316 | if len(fl) == 2 and fl[0] == fl[1]: | |
319 | copy[fl[0]] = of # not actually divergent, just matching renames |
|
317 | copy[fl[0]] = of # not actually divergent, just matching renames | |
320 |
|
318 | |||
321 | if fullcopy and repo.ui.debugflag: |
|
319 | if fullcopy and repo.ui.debugflag: | |
322 | repo.ui.debug(" all copies found (* = to merge, ! = divergent, " |
|
320 | repo.ui.debug(" all copies found (* = to merge, ! = divergent, " | |
323 | "% = renamed and deleted):\n") |
|
321 | "% = renamed and deleted):\n") | |
324 | for f in sorted(fullcopy): |
|
322 | for f in sorted(fullcopy): | |
325 | note = "" |
|
323 | note = "" | |
326 | if f in copy: |
|
324 | if f in copy: | |
327 | note += "*" |
|
325 | note += "*" | |
328 | if f in diverge2: |
|
326 | if f in diverge2: | |
329 | note += "!" |
|
327 | note += "!" | |
330 | if f in renamedelete2: |
|
328 | if f in renamedelete2: | |
331 | note += "%" |
|
329 | note += "%" | |
332 | repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f, |
|
330 | repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f, | |
333 | note)) |
|
331 | note)) | |
334 | del diverge2 |
|
332 | del diverge2 | |
335 |
|
333 | |||
336 | if not fullcopy: |
|
334 | if not fullcopy: | |
337 | return copy, movewithdir, diverge, renamedelete |
|
335 | return copy, movewithdir, diverge, renamedelete | |
338 |
|
336 | |||
339 | repo.ui.debug(" checking for directory renames\n") |
|
337 | repo.ui.debug(" checking for directory renames\n") | |
340 |
|
338 | |||
341 | # generate a directory move map |
|
339 | # generate a directory move map | |
342 | d1, d2 = c1.dirs(), c2.dirs() |
|
340 | d1, d2 = c1.dirs(), c2.dirs() | |
343 | d1.addpath('/') |
|
341 | d1.addpath('/') | |
344 | d2.addpath('/') |
|
342 | d2.addpath('/') | |
345 | invalid = set() |
|
343 | invalid = set() | |
346 | dirmove = {} |
|
344 | dirmove = {} | |
347 |
|
345 | |||
348 | # examine each file copy for a potential directory move, which is |
|
346 | # examine each file copy for a potential directory move, which is | |
349 | # when all the files in a directory are moved to a new directory |
|
347 | # when all the files in a directory are moved to a new directory | |
350 | for dst, src in fullcopy.iteritems(): |
|
348 | for dst, src in fullcopy.iteritems(): | |
351 | dsrc, ddst = _dirname(src), _dirname(dst) |
|
349 | dsrc, ddst = _dirname(src), _dirname(dst) | |
352 | if dsrc in invalid: |
|
350 | if dsrc in invalid: | |
353 | # already seen to be uninteresting |
|
351 | # already seen to be uninteresting | |
354 | continue |
|
352 | continue | |
355 | elif dsrc in d1 and ddst in d1: |
|
353 | elif dsrc in d1 and ddst in d1: | |
356 | # directory wasn't entirely moved locally |
|
354 | # directory wasn't entirely moved locally | |
357 | invalid.add(dsrc) |
|
355 | invalid.add(dsrc) | |
358 | elif dsrc in d2 and ddst in d2: |
|
356 | elif dsrc in d2 and ddst in d2: | |
359 | # directory wasn't entirely moved remotely |
|
357 | # directory wasn't entirely moved remotely | |
360 | invalid.add(dsrc) |
|
358 | invalid.add(dsrc) | |
361 | elif dsrc in dirmove and dirmove[dsrc] != ddst: |
|
359 | elif dsrc in dirmove and dirmove[dsrc] != ddst: | |
362 | # files from the same directory moved to two different places |
|
360 | # files from the same directory moved to two different places | |
363 | invalid.add(dsrc) |
|
361 | invalid.add(dsrc) | |
364 | else: |
|
362 | else: | |
365 | # looks good so far |
|
363 | # looks good so far | |
366 | dirmove[dsrc + "/"] = ddst + "/" |
|
364 | dirmove[dsrc + "/"] = ddst + "/" | |
367 |
|
365 | |||
368 | for i in invalid: |
|
366 | for i in invalid: | |
369 | if i in dirmove: |
|
367 | if i in dirmove: | |
370 | del dirmove[i] |
|
368 | del dirmove[i] | |
371 | del d1, d2, invalid |
|
369 | del d1, d2, invalid | |
372 |
|
370 | |||
373 | if not dirmove: |
|
371 | if not dirmove: | |
374 | return copy, movewithdir, diverge, renamedelete |
|
372 | return copy, movewithdir, diverge, renamedelete | |
375 |
|
373 | |||
376 | for d in dirmove: |
|
374 | for d in dirmove: | |
377 | repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" % |
|
375 | repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" % | |
378 | (d, dirmove[d])) |
|
376 | (d, dirmove[d])) | |
379 |
|
377 | |||
380 | # check unaccounted nonoverlapping files against directory moves |
|
378 | # check unaccounted nonoverlapping files against directory moves | |
381 | for f in u1 + u2: |
|
379 | for f in u1 + u2: | |
382 | if f not in fullcopy: |
|
380 | if f not in fullcopy: | |
383 | for d in dirmove: |
|
381 | for d in dirmove: | |
384 | if f.startswith(d): |
|
382 | if f.startswith(d): | |
385 | # new file added in a directory that was moved, move it |
|
383 | # new file added in a directory that was moved, move it | |
386 | df = dirmove[d] + f[len(d):] |
|
384 | df = dirmove[d] + f[len(d):] | |
387 | if df not in copy: |
|
385 | if df not in copy: | |
388 | movewithdir[f] = df |
|
386 | movewithdir[f] = df | |
389 | repo.ui.debug((" pending file src: '%s' -> " |
|
387 | repo.ui.debug((" pending file src: '%s' -> " | |
390 | "dst: '%s'\n") % (f, df)) |
|
388 | "dst: '%s'\n") % (f, df)) | |
391 | break |
|
389 | break | |
392 |
|
390 | |||
393 | return copy, movewithdir, diverge, renamedelete |
|
391 | return copy, movewithdir, diverge, renamedelete | |
394 |
|
392 | |||
395 | def checkcopies(ctx, f, m1, m2, ca, limit, diverge, copy, fullcopy): |
|
393 | def checkcopies(ctx, f, m1, m2, ca, limit, diverge, copy, fullcopy): | |
396 | """ |
|
394 | """ | |
397 | check possible copies of f from m1 to m2 |
|
395 | check possible copies of f from m1 to m2 | |
398 |
|
396 | |||
399 | ctx = function accepting (filename, node) that returns a filectx. |
|
397 | ctx = function accepting (filename, node) that returns a filectx. | |
400 | f = the filename to check |
|
398 | f = the filename to check | |
401 | m1 = the source manifest |
|
399 | m1 = the source manifest | |
402 | m2 = the destination manifest |
|
400 | m2 = the destination manifest | |
403 | ca = the changectx of the common ancestor |
|
401 | ca = the changectx of the common ancestor | |
404 | limit = the rev number to not search beyond |
|
402 | limit = the rev number to not search beyond | |
405 | diverge = record all diverges in this dict |
|
403 | diverge = record all diverges in this dict | |
406 | copy = record all non-divergent copies in this dict |
|
404 | copy = record all non-divergent copies in this dict | |
407 | fullcopy = record all copies in this dict |
|
405 | fullcopy = record all copies in this dict | |
408 | """ |
|
406 | """ | |
409 |
|
407 | |||
410 | ma = ca.manifest() |
|
408 | ma = ca.manifest() | |
411 |
|
409 | |||
412 | def _related(f1, f2, limit): |
|
410 | def _related(f1, f2, limit): | |
413 | # Walk back to common ancestor to see if the two files originate |
|
411 | # Walk back to common ancestor to see if the two files originate | |
414 | # from the same file. Since workingfilectx's rev() is None it messes |
|
412 | # from the same file. Since workingfilectx's rev() is None it messes | |
415 | # up the integer comparison logic, hence the pre-step check for |
|
413 | # up the integer comparison logic, hence the pre-step check for | |
416 | # None (f1 and f2 can only be workingfilectx's initially). |
|
414 | # None (f1 and f2 can only be workingfilectx's initially). | |
417 |
|
415 | |||
418 | if f1 == f2: |
|
416 | if f1 == f2: | |
419 | return f1 # a match |
|
417 | return f1 # a match | |
420 |
|
418 | |||
421 | g1, g2 = f1.ancestors(), f2.ancestors() |
|
419 | g1, g2 = f1.ancestors(), f2.ancestors() | |
422 | try: |
|
420 | try: | |
423 | f1r, f2r = f1.rev(), f2.rev() |
|
421 | f1r, f2r = f1.rev(), f2.rev() | |
424 |
|
422 | |||
425 | if f1r is None: |
|
423 | if f1r is None: | |
426 | f1 = g1.next() |
|
424 | f1 = g1.next() | |
427 | if f2r is None: |
|
425 | if f2r is None: | |
428 | f2 = g2.next() |
|
426 | f2 = g2.next() | |
429 |
|
427 | |||
430 | while True: |
|
428 | while True: | |
431 | f1r, f2r = f1.rev(), f2.rev() |
|
429 | f1r, f2r = f1.rev(), f2.rev() | |
432 | if f1r > f2r: |
|
430 | if f1r > f2r: | |
433 | f1 = g1.next() |
|
431 | f1 = g1.next() | |
434 | elif f2r > f1r: |
|
432 | elif f2r > f1r: | |
435 | f2 = g2.next() |
|
433 | f2 = g2.next() | |
436 | elif f1 == f2: |
|
434 | elif f1 == f2: | |
437 | return f1 # a match |
|
435 | return f1 # a match | |
438 | elif f1r == f2r or f1r < limit or f2r < limit: |
|
436 | elif f1r == f2r or f1r < limit or f2r < limit: | |
439 | return False # copy no longer relevant |
|
437 | return False # copy no longer relevant | |
440 | except StopIteration: |
|
438 | except StopIteration: | |
441 | return False |
|
439 | return False | |
442 |
|
440 | |||
443 | of = None |
|
441 | of = None | |
444 | seen = set([f]) |
|
442 | seen = set([f]) | |
445 | for oc in ctx(f, m1[f]).ancestors(): |
|
443 | for oc in ctx(f, m1[f]).ancestors(): | |
446 | ocr = oc.rev() |
|
444 | ocr = oc.rev() | |
447 | of = oc.path() |
|
445 | of = oc.path() | |
448 | if of in seen: |
|
446 | if of in seen: | |
449 | # check limit late - grab last rename before |
|
447 | # check limit late - grab last rename before | |
450 | if ocr < limit: |
|
448 | if ocr < limit: | |
451 | break |
|
449 | break | |
452 | continue |
|
450 | continue | |
453 | seen.add(of) |
|
451 | seen.add(of) | |
454 |
|
452 | |||
455 | fullcopy[f] = of # remember for dir rename detection |
|
453 | fullcopy[f] = of # remember for dir rename detection | |
456 | if of not in m2: |
|
454 | if of not in m2: | |
457 | continue # no match, keep looking |
|
455 | continue # no match, keep looking | |
458 | if m2[of] == ma.get(of): |
|
456 | if m2[of] == ma.get(of): | |
459 | break # no merge needed, quit early |
|
457 | break # no merge needed, quit early | |
460 | c2 = ctx(of, m2[of]) |
|
458 | c2 = ctx(of, m2[of]) | |
461 | cr = _related(oc, c2, ca.rev()) |
|
459 | cr = _related(oc, c2, ca.rev()) | |
462 | if cr and (of == f or of == c2.path()): # non-divergent |
|
460 | if cr and (of == f or of == c2.path()): # non-divergent | |
463 | copy[f] = of |
|
461 | copy[f] = of | |
464 | of = None |
|
462 | of = None | |
465 | break |
|
463 | break | |
466 |
|
464 | |||
467 | if of in ma: |
|
465 | if of in ma: | |
468 | diverge.setdefault(of, []).append(f) |
|
466 | diverge.setdefault(of, []).append(f) | |
469 |
|
467 | |||
470 | def duplicatecopies(repo, rev, fromrev, skiprev=None): |
|
468 | def duplicatecopies(repo, rev, fromrev, skiprev=None): | |
471 | '''reproduce copies from fromrev to rev in the dirstate |
|
469 | '''reproduce copies from fromrev to rev in the dirstate | |
472 |
|
470 | |||
473 | If skiprev is specified, it's a revision that should be used to |
|
471 | If skiprev is specified, it's a revision that should be used to | |
474 | filter copy records. Any copies that occur between fromrev and |
|
472 | filter copy records. Any copies that occur between fromrev and | |
475 | skiprev will not be duplicated, even if they appear in the set of |
|
473 | skiprev will not be duplicated, even if they appear in the set of | |
476 | copies between fromrev and rev. |
|
474 | copies between fromrev and rev. | |
477 | ''' |
|
475 | ''' | |
478 | exclude = {} |
|
476 | exclude = {} | |
479 | if skiprev is not None: |
|
477 | if skiprev is not None: | |
480 | exclude = pathcopies(repo[fromrev], repo[skiprev]) |
|
478 | exclude = pathcopies(repo[fromrev], repo[skiprev]) | |
481 | for dst, src in pathcopies(repo[fromrev], repo[rev]).iteritems(): |
|
479 | for dst, src in pathcopies(repo[fromrev], repo[rev]).iteritems(): | |
482 | # copies.pathcopies returns backward renames, so dst might not |
|
480 | # copies.pathcopies returns backward renames, so dst might not | |
483 | # actually be in the dirstate |
|
481 | # actually be in the dirstate | |
484 | if dst in exclude: |
|
482 | if dst in exclude: | |
485 | continue |
|
483 | continue | |
486 | if repo.dirstate[dst] in "nma": |
|
484 | if repo.dirstate[dst] in "nma": | |
487 | repo.dirstate.copy(src, dst) |
|
485 | repo.dirstate.copy(src, dst) |
@@ -1,298 +1,304 b'' | |||||
1 | # manifest.py - manifest revision class for mercurial |
|
1 | # manifest.py - manifest revision class for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from i18n import _ |
|
8 | from i18n import _ | |
9 | import mdiff, parsers, error, revlog, util |
|
9 | import mdiff, parsers, error, revlog, util | |
10 | import array, struct |
|
10 | import array, struct | |
11 |
|
11 | |||
12 | class manifestdict(dict): |
|
12 | class manifestdict(dict): | |
13 | def __init__(self): |
|
13 | def __init__(self): | |
14 | self._flags = {} |
|
14 | self._flags = {} | |
15 | def __setitem__(self, k, v): |
|
15 | def __setitem__(self, k, v): | |
16 | assert v is not None |
|
16 | assert v is not None | |
17 | dict.__setitem__(self, k, v) |
|
17 | dict.__setitem__(self, k, v) | |
18 | def flags(self, f): |
|
18 | def flags(self, f): | |
19 | return self._flags.get(f, "") |
|
19 | return self._flags.get(f, "") | |
20 | def setflag(self, f, flags): |
|
20 | def setflag(self, f, flags): | |
21 | """Set the flags (symlink, executable) for path f.""" |
|
21 | """Set the flags (symlink, executable) for path f.""" | |
22 | self._flags[f] = flags |
|
22 | self._flags[f] = flags | |
23 | def copy(self): |
|
23 | def copy(self): | |
24 | copy = manifestdict() |
|
24 | copy = manifestdict() | |
25 | dict.__init__(copy, self) |
|
25 | dict.__init__(copy, self) | |
26 | copy._flags = dict.copy(self._flags) |
|
26 | copy._flags = dict.copy(self._flags) | |
27 | return copy |
|
27 | return copy | |
28 | def intersectfiles(self, files): |
|
28 | def intersectfiles(self, files): | |
29 | '''make a new manifestdict with the intersection of self with files |
|
29 | '''make a new manifestdict with the intersection of self with files | |
30 |
|
30 | |||
31 | The algorithm assumes that files is much smaller than self.''' |
|
31 | The algorithm assumes that files is much smaller than self.''' | |
32 | ret = manifestdict() |
|
32 | ret = manifestdict() | |
33 | for fn in files: |
|
33 | for fn in files: | |
34 | if fn in self: |
|
34 | if fn in self: | |
35 | ret[fn] = self[fn] |
|
35 | ret[fn] = self[fn] | |
36 | flags = self._flags.get(fn, None) |
|
36 | flags = self._flags.get(fn, None) | |
37 | if flags: |
|
37 | if flags: | |
38 | ret._flags[fn] = flags |
|
38 | ret._flags[fn] = flags | |
39 | return ret |
|
39 | return ret | |
40 |
|
40 | |||
|
41 | def filesnotin(self, m2): | |||
|
42 | '''Set of files in this manifest that are not in the other''' | |||
|
43 | files = set(self.iterkeys()) | |||
|
44 | files.difference_update(m2.iterkeys()) | |||
|
45 | return files | |||
|
46 | ||||
41 | def matches(self, match): |
|
47 | def matches(self, match): | |
42 | '''generate a new manifest filtered by the match argument''' |
|
48 | '''generate a new manifest filtered by the match argument''' | |
43 | if match.always(): |
|
49 | if match.always(): | |
44 | return self.copy() |
|
50 | return self.copy() | |
45 |
|
51 | |||
46 | files = match.files() |
|
52 | files = match.files() | |
47 | if (match.matchfn == match.exact or |
|
53 | if (match.matchfn == match.exact or | |
48 | (not match.anypats() and util.all(fn in self for fn in files))): |
|
54 | (not match.anypats() and util.all(fn in self for fn in files))): | |
49 | return self.intersectfiles(files) |
|
55 | return self.intersectfiles(files) | |
50 |
|
56 | |||
51 | m = self.copy() |
|
57 | m = self.copy() | |
52 | for fn in m.keys(): |
|
58 | for fn in m.keys(): | |
53 | if not match(fn): |
|
59 | if not match(fn): | |
54 | del m[fn] |
|
60 | del m[fn] | |
55 | return m |
|
61 | return m | |
56 |
|
62 | |||
57 | def diff(self, m2, clean=False): |
|
63 | def diff(self, m2, clean=False): | |
58 | '''Finds changes between the current manifest and m2. |
|
64 | '''Finds changes between the current manifest and m2. | |
59 |
|
65 | |||
60 | Args: |
|
66 | Args: | |
61 | m2: the manifest to which this manifest should be compared. |
|
67 | m2: the manifest to which this manifest should be compared. | |
62 | clean: if true, include files unchanged between these manifests |
|
68 | clean: if true, include files unchanged between these manifests | |
63 | with a None value in the returned dictionary. |
|
69 | with a None value in the returned dictionary. | |
64 |
|
70 | |||
65 | The result is returned as a dict with filename as key and |
|
71 | The result is returned as a dict with filename as key and | |
66 | values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the |
|
72 | values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the | |
67 | nodeid in the current/other manifest and fl1/fl2 is the flag |
|
73 | nodeid in the current/other manifest and fl1/fl2 is the flag | |
68 | in the current/other manifest. Where the file does not exist, |
|
74 | in the current/other manifest. Where the file does not exist, | |
69 | the nodeid will be None and the flags will be the empty |
|
75 | the nodeid will be None and the flags will be the empty | |
70 | string. |
|
76 | string. | |
71 | ''' |
|
77 | ''' | |
72 | diff = {} |
|
78 | diff = {} | |
73 |
|
79 | |||
74 | for fn, n1 in self.iteritems(): |
|
80 | for fn, n1 in self.iteritems(): | |
75 | fl1 = self._flags.get(fn, '') |
|
81 | fl1 = self._flags.get(fn, '') | |
76 | n2 = m2.get(fn, None) |
|
82 | n2 = m2.get(fn, None) | |
77 | fl2 = m2._flags.get(fn, '') |
|
83 | fl2 = m2._flags.get(fn, '') | |
78 | if n2 is None: |
|
84 | if n2 is None: | |
79 | fl2 = '' |
|
85 | fl2 = '' | |
80 | if n1 != n2 or fl1 != fl2: |
|
86 | if n1 != n2 or fl1 != fl2: | |
81 | diff[fn] = ((n1, fl1), (n2, fl2)) |
|
87 | diff[fn] = ((n1, fl1), (n2, fl2)) | |
82 | elif clean: |
|
88 | elif clean: | |
83 | diff[fn] = None |
|
89 | diff[fn] = None | |
84 |
|
90 | |||
85 | for fn, n2 in m2.iteritems(): |
|
91 | for fn, n2 in m2.iteritems(): | |
86 | if fn not in self: |
|
92 | if fn not in self: | |
87 | fl2 = m2._flags.get(fn, '') |
|
93 | fl2 = m2._flags.get(fn, '') | |
88 | diff[fn] = ((None, ''), (n2, fl2)) |
|
94 | diff[fn] = ((None, ''), (n2, fl2)) | |
89 |
|
95 | |||
90 | return diff |
|
96 | return diff | |
91 |
|
97 | |||
92 | def text(self): |
|
98 | def text(self): | |
93 | """Get the full data of this manifest as a bytestring.""" |
|
99 | """Get the full data of this manifest as a bytestring.""" | |
94 | fl = sorted(self) |
|
100 | fl = sorted(self) | |
95 | _checkforbidden(fl) |
|
101 | _checkforbidden(fl) | |
96 |
|
102 | |||
97 | hex, flags = revlog.hex, self.flags |
|
103 | hex, flags = revlog.hex, self.flags | |
98 | # if this is changed to support newlines in filenames, |
|
104 | # if this is changed to support newlines in filenames, | |
99 | # be sure to check the templates/ dir again (especially *-raw.tmpl) |
|
105 | # be sure to check the templates/ dir again (especially *-raw.tmpl) | |
100 | return ''.join("%s\0%s%s\n" % (f, hex(self[f]), flags(f)) for f in fl) |
|
106 | return ''.join("%s\0%s%s\n" % (f, hex(self[f]), flags(f)) for f in fl) | |
101 |
|
107 | |||
102 | def fastdelta(self, base, changes): |
|
108 | def fastdelta(self, base, changes): | |
103 | """Given a base manifest text as an array.array and a list of changes |
|
109 | """Given a base manifest text as an array.array and a list of changes | |
104 | relative to that text, compute a delta that can be used by revlog. |
|
110 | relative to that text, compute a delta that can be used by revlog. | |
105 | """ |
|
111 | """ | |
106 | delta = [] |
|
112 | delta = [] | |
107 | dstart = None |
|
113 | dstart = None | |
108 | dend = None |
|
114 | dend = None | |
109 | dline = [""] |
|
115 | dline = [""] | |
110 | start = 0 |
|
116 | start = 0 | |
111 | # zero copy representation of base as a buffer |
|
117 | # zero copy representation of base as a buffer | |
112 | addbuf = util.buffer(base) |
|
118 | addbuf = util.buffer(base) | |
113 |
|
119 | |||
114 | # start with a readonly loop that finds the offset of |
|
120 | # start with a readonly loop that finds the offset of | |
115 | # each line and creates the deltas |
|
121 | # each line and creates the deltas | |
116 | for f, todelete in changes: |
|
122 | for f, todelete in changes: | |
117 | # bs will either be the index of the item or the insert point |
|
123 | # bs will either be the index of the item or the insert point | |
118 | start, end = _msearch(addbuf, f, start) |
|
124 | start, end = _msearch(addbuf, f, start) | |
119 | if not todelete: |
|
125 | if not todelete: | |
120 | l = "%s\0%s%s\n" % (f, revlog.hex(self[f]), self.flags(f)) |
|
126 | l = "%s\0%s%s\n" % (f, revlog.hex(self[f]), self.flags(f)) | |
121 | else: |
|
127 | else: | |
122 | if start == end: |
|
128 | if start == end: | |
123 | # item we want to delete was not found, error out |
|
129 | # item we want to delete was not found, error out | |
124 | raise AssertionError( |
|
130 | raise AssertionError( | |
125 | _("failed to remove %s from manifest") % f) |
|
131 | _("failed to remove %s from manifest") % f) | |
126 | l = "" |
|
132 | l = "" | |
127 | if dstart is not None and dstart <= start and dend >= start: |
|
133 | if dstart is not None and dstart <= start and dend >= start: | |
128 | if dend < end: |
|
134 | if dend < end: | |
129 | dend = end |
|
135 | dend = end | |
130 | if l: |
|
136 | if l: | |
131 | dline.append(l) |
|
137 | dline.append(l) | |
132 | else: |
|
138 | else: | |
133 | if dstart is not None: |
|
139 | if dstart is not None: | |
134 | delta.append([dstart, dend, "".join(dline)]) |
|
140 | delta.append([dstart, dend, "".join(dline)]) | |
135 | dstart = start |
|
141 | dstart = start | |
136 | dend = end |
|
142 | dend = end | |
137 | dline = [l] |
|
143 | dline = [l] | |
138 |
|
144 | |||
139 | if dstart is not None: |
|
145 | if dstart is not None: | |
140 | delta.append([dstart, dend, "".join(dline)]) |
|
146 | delta.append([dstart, dend, "".join(dline)]) | |
141 | # apply the delta to the base, and get a delta for addrevision |
|
147 | # apply the delta to the base, and get a delta for addrevision | |
142 | deltatext, arraytext = _addlistdelta(base, delta) |
|
148 | deltatext, arraytext = _addlistdelta(base, delta) | |
143 | return arraytext, deltatext |
|
149 | return arraytext, deltatext | |
144 |
|
150 | |||
145 | def _msearch(m, s, lo=0, hi=None): |
|
151 | def _msearch(m, s, lo=0, hi=None): | |
146 | '''return a tuple (start, end) that says where to find s within m. |
|
152 | '''return a tuple (start, end) that says where to find s within m. | |
147 |
|
153 | |||
148 | If the string is found m[start:end] are the line containing |
|
154 | If the string is found m[start:end] are the line containing | |
149 | that string. If start == end the string was not found and |
|
155 | that string. If start == end the string was not found and | |
150 | they indicate the proper sorted insertion point. |
|
156 | they indicate the proper sorted insertion point. | |
151 |
|
157 | |||
152 | m should be a buffer or a string |
|
158 | m should be a buffer or a string | |
153 | s is a string''' |
|
159 | s is a string''' | |
154 | def advance(i, c): |
|
160 | def advance(i, c): | |
155 | while i < lenm and m[i] != c: |
|
161 | while i < lenm and m[i] != c: | |
156 | i += 1 |
|
162 | i += 1 | |
157 | return i |
|
163 | return i | |
158 | if not s: |
|
164 | if not s: | |
159 | return (lo, lo) |
|
165 | return (lo, lo) | |
160 | lenm = len(m) |
|
166 | lenm = len(m) | |
161 | if not hi: |
|
167 | if not hi: | |
162 | hi = lenm |
|
168 | hi = lenm | |
163 | while lo < hi: |
|
169 | while lo < hi: | |
164 | mid = (lo + hi) // 2 |
|
170 | mid = (lo + hi) // 2 | |
165 | start = mid |
|
171 | start = mid | |
166 | while start > 0 and m[start - 1] != '\n': |
|
172 | while start > 0 and m[start - 1] != '\n': | |
167 | start -= 1 |
|
173 | start -= 1 | |
168 | end = advance(start, '\0') |
|
174 | end = advance(start, '\0') | |
169 | if m[start:end] < s: |
|
175 | if m[start:end] < s: | |
170 | # we know that after the null there are 40 bytes of sha1 |
|
176 | # we know that after the null there are 40 bytes of sha1 | |
171 | # this translates to the bisect lo = mid + 1 |
|
177 | # this translates to the bisect lo = mid + 1 | |
172 | lo = advance(end + 40, '\n') + 1 |
|
178 | lo = advance(end + 40, '\n') + 1 | |
173 | else: |
|
179 | else: | |
174 | # this translates to the bisect hi = mid |
|
180 | # this translates to the bisect hi = mid | |
175 | hi = start |
|
181 | hi = start | |
176 | end = advance(lo, '\0') |
|
182 | end = advance(lo, '\0') | |
177 | found = m[lo:end] |
|
183 | found = m[lo:end] | |
178 | if s == found: |
|
184 | if s == found: | |
179 | # we know that after the null there are 40 bytes of sha1 |
|
185 | # we know that after the null there are 40 bytes of sha1 | |
180 | end = advance(end + 40, '\n') |
|
186 | end = advance(end + 40, '\n') | |
181 | return (lo, end + 1) |
|
187 | return (lo, end + 1) | |
182 | else: |
|
188 | else: | |
183 | return (lo, lo) |
|
189 | return (lo, lo) | |
184 |
|
190 | |||
185 | def _checkforbidden(l): |
|
191 | def _checkforbidden(l): | |
186 | """Check filenames for illegal characters.""" |
|
192 | """Check filenames for illegal characters.""" | |
187 | for f in l: |
|
193 | for f in l: | |
188 | if '\n' in f or '\r' in f: |
|
194 | if '\n' in f or '\r' in f: | |
189 | raise error.RevlogError( |
|
195 | raise error.RevlogError( | |
190 | _("'\\n' and '\\r' disallowed in filenames: %r") % f) |
|
196 | _("'\\n' and '\\r' disallowed in filenames: %r") % f) | |
191 |
|
197 | |||
192 |
|
198 | |||
193 | # apply the changes collected during the bisect loop to our addlist |
|
199 | # apply the changes collected during the bisect loop to our addlist | |
194 | # return a delta suitable for addrevision |
|
200 | # return a delta suitable for addrevision | |
195 | def _addlistdelta(addlist, x): |
|
201 | def _addlistdelta(addlist, x): | |
196 | # for large addlist arrays, building a new array is cheaper |
|
202 | # for large addlist arrays, building a new array is cheaper | |
197 | # than repeatedly modifying the existing one |
|
203 | # than repeatedly modifying the existing one | |
198 | currentposition = 0 |
|
204 | currentposition = 0 | |
199 | newaddlist = array.array('c') |
|
205 | newaddlist = array.array('c') | |
200 |
|
206 | |||
201 | for start, end, content in x: |
|
207 | for start, end, content in x: | |
202 | newaddlist += addlist[currentposition:start] |
|
208 | newaddlist += addlist[currentposition:start] | |
203 | if content: |
|
209 | if content: | |
204 | newaddlist += array.array('c', content) |
|
210 | newaddlist += array.array('c', content) | |
205 |
|
211 | |||
206 | currentposition = end |
|
212 | currentposition = end | |
207 |
|
213 | |||
208 | newaddlist += addlist[currentposition:] |
|
214 | newaddlist += addlist[currentposition:] | |
209 |
|
215 | |||
210 | deltatext = "".join(struct.pack(">lll", start, end, len(content)) |
|
216 | deltatext = "".join(struct.pack(">lll", start, end, len(content)) | |
211 | + content for start, end, content in x) |
|
217 | + content for start, end, content in x) | |
212 | return deltatext, newaddlist |
|
218 | return deltatext, newaddlist | |
213 |
|
219 | |||
214 | def _parse(lines): |
|
220 | def _parse(lines): | |
215 | mfdict = manifestdict() |
|
221 | mfdict = manifestdict() | |
216 | parsers.parse_manifest(mfdict, mfdict._flags, lines) |
|
222 | parsers.parse_manifest(mfdict, mfdict._flags, lines) | |
217 | return mfdict |
|
223 | return mfdict | |
218 |
|
224 | |||
219 | class manifest(revlog.revlog): |
|
225 | class manifest(revlog.revlog): | |
220 | def __init__(self, opener): |
|
226 | def __init__(self, opener): | |
221 | # During normal operations, we expect to deal with not more than four |
|
227 | # During normal operations, we expect to deal with not more than four | |
222 | # revs at a time (such as during commit --amend). When rebasing large |
|
228 | # revs at a time (such as during commit --amend). When rebasing large | |
223 | # stacks of commits, the number can go up, hence the config knob below. |
|
229 | # stacks of commits, the number can go up, hence the config knob below. | |
224 | cachesize = 4 |
|
230 | cachesize = 4 | |
225 | opts = getattr(opener, 'options', None) |
|
231 | opts = getattr(opener, 'options', None) | |
226 | if opts is not None: |
|
232 | if opts is not None: | |
227 | cachesize = opts.get('manifestcachesize', cachesize) |
|
233 | cachesize = opts.get('manifestcachesize', cachesize) | |
228 | self._mancache = util.lrucachedict(cachesize) |
|
234 | self._mancache = util.lrucachedict(cachesize) | |
229 | revlog.revlog.__init__(self, opener, "00manifest.i") |
|
235 | revlog.revlog.__init__(self, opener, "00manifest.i") | |
230 |
|
236 | |||
231 | def readdelta(self, node): |
|
237 | def readdelta(self, node): | |
232 | r = self.rev(node) |
|
238 | r = self.rev(node) | |
233 | return _parse(mdiff.patchtext(self.revdiff(self.deltaparent(r), r))) |
|
239 | return _parse(mdiff.patchtext(self.revdiff(self.deltaparent(r), r))) | |
234 |
|
240 | |||
235 | def readfast(self, node): |
|
241 | def readfast(self, node): | |
236 | '''use the faster of readdelta or read''' |
|
242 | '''use the faster of readdelta or read''' | |
237 | r = self.rev(node) |
|
243 | r = self.rev(node) | |
238 | deltaparent = self.deltaparent(r) |
|
244 | deltaparent = self.deltaparent(r) | |
239 | if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r): |
|
245 | if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r): | |
240 | return self.readdelta(node) |
|
246 | return self.readdelta(node) | |
241 | return self.read(node) |
|
247 | return self.read(node) | |
242 |
|
248 | |||
243 | def read(self, node): |
|
249 | def read(self, node): | |
244 | if node == revlog.nullid: |
|
250 | if node == revlog.nullid: | |
245 | return manifestdict() # don't upset local cache |
|
251 | return manifestdict() # don't upset local cache | |
246 | if node in self._mancache: |
|
252 | if node in self._mancache: | |
247 | return self._mancache[node][0] |
|
253 | return self._mancache[node][0] | |
248 | text = self.revision(node) |
|
254 | text = self.revision(node) | |
249 | arraytext = array.array('c', text) |
|
255 | arraytext = array.array('c', text) | |
250 | m = _parse(text) |
|
256 | m = _parse(text) | |
251 | self._mancache[node] = (m, arraytext) |
|
257 | self._mancache[node] = (m, arraytext) | |
252 | return m |
|
258 | return m | |
253 |
|
259 | |||
254 | def find(self, node, f): |
|
260 | def find(self, node, f): | |
255 | '''look up entry for a single file efficiently. |
|
261 | '''look up entry for a single file efficiently. | |
256 | return (node, flags) pair if found, (None, None) if not.''' |
|
262 | return (node, flags) pair if found, (None, None) if not.''' | |
257 | if node in self._mancache: |
|
263 | if node in self._mancache: | |
258 | m = self._mancache[node][0] |
|
264 | m = self._mancache[node][0] | |
259 | return m.get(f), m.flags(f) |
|
265 | return m.get(f), m.flags(f) | |
260 | text = self.revision(node) |
|
266 | text = self.revision(node) | |
261 | start, end = _msearch(text, f) |
|
267 | start, end = _msearch(text, f) | |
262 | if start == end: |
|
268 | if start == end: | |
263 | return None, None |
|
269 | return None, None | |
264 | l = text[start:end] |
|
270 | l = text[start:end] | |
265 | f, n = l.split('\0') |
|
271 | f, n = l.split('\0') | |
266 | return revlog.bin(n[:40]), n[40:-1] |
|
272 | return revlog.bin(n[:40]), n[40:-1] | |
267 |
|
273 | |||
268 | def add(self, m, transaction, link, p1, p2, added, removed): |
|
274 | def add(self, m, transaction, link, p1, p2, added, removed): | |
269 | if p1 in self._mancache: |
|
275 | if p1 in self._mancache: | |
270 | # If our first parent is in the manifest cache, we can |
|
276 | # If our first parent is in the manifest cache, we can | |
271 | # compute a delta here using properties we know about the |
|
277 | # compute a delta here using properties we know about the | |
272 | # manifest up-front, which may save time later for the |
|
278 | # manifest up-front, which may save time later for the | |
273 | # revlog layer. |
|
279 | # revlog layer. | |
274 |
|
280 | |||
275 | _checkforbidden(added) |
|
281 | _checkforbidden(added) | |
276 | # combine the changed lists into one list for sorting |
|
282 | # combine the changed lists into one list for sorting | |
277 | work = [(x, False) for x in added] |
|
283 | work = [(x, False) for x in added] | |
278 | work.extend((x, True) for x in removed) |
|
284 | work.extend((x, True) for x in removed) | |
279 | # this could use heapq.merge() (from Python 2.6+) or equivalent |
|
285 | # this could use heapq.merge() (from Python 2.6+) or equivalent | |
280 | # since the lists are already sorted |
|
286 | # since the lists are already sorted | |
281 | work.sort() |
|
287 | work.sort() | |
282 |
|
288 | |||
283 | arraytext, deltatext = m.fastdelta(self._mancache[p1][1], work) |
|
289 | arraytext, deltatext = m.fastdelta(self._mancache[p1][1], work) | |
284 | cachedelta = self.rev(p1), deltatext |
|
290 | cachedelta = self.rev(p1), deltatext | |
285 | text = util.buffer(arraytext) |
|
291 | text = util.buffer(arraytext) | |
286 | else: |
|
292 | else: | |
287 | # The first parent manifest isn't already loaded, so we'll |
|
293 | # The first parent manifest isn't already loaded, so we'll | |
288 | # just encode a fulltext of the manifest and pass that |
|
294 | # just encode a fulltext of the manifest and pass that | |
289 | # through to the revlog layer, and let it handle the delta |
|
295 | # through to the revlog layer, and let it handle the delta | |
290 | # process. |
|
296 | # process. | |
291 | text = m.text() |
|
297 | text = m.text() | |
292 | arraytext = array.array('c', text) |
|
298 | arraytext = array.array('c', text) | |
293 | cachedelta = None |
|
299 | cachedelta = None | |
294 |
|
300 | |||
295 | n = self.addrevision(text, transaction, link, p1, p2, cachedelta) |
|
301 | n = self.addrevision(text, transaction, link, p1, p2, cachedelta) | |
296 | self._mancache[n] = (m, arraytext) |
|
302 | self._mancache[n] = (m, arraytext) | |
297 |
|
303 | |||
298 | return n |
|
304 | return n |
General Comments 0
You need to be logged in to leave comments.
Login now