Show More
The requested changes are too big and content was truncated. Show full diff
@@ -0,0 +1,223 b'' | |||
|
1 | # branchmap.py - logic to computes, maintain and stores branchmap for local repo | |
|
2 | # | |
|
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | |
|
4 | # | |
|
5 | # This software may be used and distributed according to the terms of the | |
|
6 | # GNU General Public License version 2 or any later version. | |
|
7 | ||
|
8 | from node import bin, hex, nullid, nullrev | |
|
9 | import encoding | |
|
10 | import util, repoview | |
|
11 | ||
|
12 | def _filename(repo): | |
|
13 | """name of a branchcache file for a given repo or repoview""" | |
|
14 | filename = "cache/branchheads" | |
|
15 | if repo.filtername: | |
|
16 | filename = '%s-%s' % (filename, repo.filtername) | |
|
17 | return filename | |
|
18 | ||
|
19 | def read(repo): | |
|
20 | try: | |
|
21 | f = repo.opener(_filename(repo)) | |
|
22 | lines = f.read().split('\n') | |
|
23 | f.close() | |
|
24 | except (IOError, OSError): | |
|
25 | return None | |
|
26 | ||
|
27 | try: | |
|
28 | cachekey = lines.pop(0).split(" ", 2) | |
|
29 | last, lrev = cachekey[:2] | |
|
30 | last, lrev = bin(last), int(lrev) | |
|
31 | filteredhash = None | |
|
32 | if len(cachekey) > 2: | |
|
33 | filteredhash = bin(cachekey[2]) | |
|
34 | partial = branchcache(tipnode=last, tiprev=lrev, | |
|
35 | filteredhash=filteredhash) | |
|
36 | if not partial.validfor(repo): | |
|
37 | # invalidate the cache | |
|
38 | raise ValueError('tip differs') | |
|
39 | for l in lines: | |
|
40 | if not l: | |
|
41 | continue | |
|
42 | node, label = l.split(" ", 1) | |
|
43 | label = encoding.tolocal(label.strip()) | |
|
44 | if not node in repo: | |
|
45 | raise ValueError('node %s does not exist' % node) | |
|
46 | partial.setdefault(label, []).append(bin(node)) | |
|
47 | except KeyboardInterrupt: | |
|
48 | raise | |
|
49 | except Exception, inst: | |
|
50 | if repo.ui.debugflag: | |
|
51 | msg = 'invalid branchheads cache' | |
|
52 | if repo.filtername is not None: | |
|
53 | msg += ' (%s)' % repo.filtername | |
|
54 | msg += ': %s\n' | |
|
55 | repo.ui.warn(msg % inst) | |
|
56 | partial = None | |
|
57 | return partial | |
|
58 | ||
|
59 | ||
|
60 | ||
|
61 | def updatecache(repo): | |
|
62 | cl = repo.changelog | |
|
63 | filtername = repo.filtername | |
|
64 | partial = repo._branchcaches.get(filtername) | |
|
65 | ||
|
66 | revs = [] | |
|
67 | if partial is None or not partial.validfor(repo): | |
|
68 | partial = read(repo) | |
|
69 | if partial is None: | |
|
70 | subsetname = repoview.subsettable.get(filtername) | |
|
71 | if subsetname is None: | |
|
72 | partial = branchcache() | |
|
73 | else: | |
|
74 | subset = repo.filtered(subsetname) | |
|
75 | partial = subset.branchmap().copy() | |
|
76 | extrarevs = subset.changelog.filteredrevs - cl.filteredrevs | |
|
77 | revs.extend(r for r in extrarevs if r <= partial.tiprev) | |
|
78 | revs.extend(cl.revs(start=partial.tiprev + 1)) | |
|
79 | if revs: | |
|
80 | partial.update(repo, revs) | |
|
81 | partial.write(repo) | |
|
82 | assert partial.validfor(repo), filtername | |
|
83 | repo._branchcaches[repo.filtername] = partial | |
|
84 | ||
|
85 | class branchcache(dict): | |
|
86 | """A dict like object that hold branches heads cache""" | |
|
87 | ||
|
88 | def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev, | |
|
89 | filteredhash=None): | |
|
90 | super(branchcache, self).__init__(entries) | |
|
91 | self.tipnode = tipnode | |
|
92 | self.tiprev = tiprev | |
|
93 | self.filteredhash = filteredhash | |
|
94 | ||
|
95 | def _hashfiltered(self, repo): | |
|
96 | """build hash of revision filtered in the current cache | |
|
97 | ||
|
98 | Tracking tipnode and tiprev is not enough to ensure validaty of the | |
|
99 | cache as they do not help to distinct cache that ignored various | |
|
100 | revision bellow tiprev. | |
|
101 | ||
|
102 | To detect such difference, we build a cache of all ignored revisions. | |
|
103 | """ | |
|
104 | cl = repo.changelog | |
|
105 | if not cl.filteredrevs: | |
|
106 | return None | |
|
107 | key = None | |
|
108 | revs = sorted(r for r in cl.filteredrevs if r <= self.tiprev) | |
|
109 | if revs: | |
|
110 | s = util.sha1() | |
|
111 | for rev in revs: | |
|
112 | s.update('%s;' % rev) | |
|
113 | key = s.digest() | |
|
114 | return key | |
|
115 | ||
|
116 | def validfor(self, repo): | |
|
117 | """Is the cache content valide regarding a repo | |
|
118 | ||
|
119 | - False when cached tipnode are unknown or if we detect a strip. | |
|
120 | - True when cache is up to date or a subset of current repo.""" | |
|
121 | try: | |
|
122 | return ((self.tipnode == repo.changelog.node(self.tiprev)) | |
|
123 | and (self.filteredhash == self._hashfiltered(repo))) | |
|
124 | except IndexError: | |
|
125 | return False | |
|
126 | ||
|
127 | def copy(self): | |
|
128 | """return an deep copy of the branchcache object""" | |
|
129 | return branchcache(self, self.tipnode, self.tiprev, self.filteredhash) | |
|
130 | ||
|
131 | def write(self, repo): | |
|
132 | try: | |
|
133 | f = repo.opener(_filename(repo), "w", atomictemp=True) | |
|
134 | cachekey = [hex(self.tipnode), str(self.tiprev)] | |
|
135 | if self.filteredhash is not None: | |
|
136 | cachekey.append(hex(self.filteredhash)) | |
|
137 | f.write(" ".join(cachekey) + '\n') | |
|
138 | for label, nodes in sorted(self.iteritems()): | |
|
139 | for node in nodes: | |
|
140 | f.write("%s %s\n" % (hex(node), encoding.fromlocal(label))) | |
|
141 | f.close() | |
|
142 | except (IOError, OSError, util.Abort): | |
|
143 | # Abort may be raise by read only opener | |
|
144 | pass | |
|
145 | ||
|
146 | def update(self, repo, revgen): | |
|
147 | """Given a branchhead cache, self, that may have extra nodes or be | |
|
148 | missing heads, and a generator of nodes that are at least a superset of | |
|
149 | heads missing, this function updates self to be correct. | |
|
150 | """ | |
|
151 | cl = repo.changelog | |
|
152 | # collect new branch entries | |
|
153 | newbranches = {} | |
|
154 | getbranch = cl.branch | |
|
155 | for r in revgen: | |
|
156 | newbranches.setdefault(getbranch(r), []).append(cl.node(r)) | |
|
157 | # if older branchheads are reachable from new ones, they aren't | |
|
158 | # really branchheads. Note checking parents is insufficient: | |
|
159 | # 1 (branch a) -> 2 (branch b) -> 3 (branch a) | |
|
160 | for branch, newnodes in newbranches.iteritems(): | |
|
161 | bheads = self.setdefault(branch, []) | |
|
162 | # Remove candidate heads that no longer are in the repo (e.g., as | |
|
163 | # the result of a strip that just happened). Avoid using 'node in | |
|
164 | # self' here because that dives down into branchcache code somewhat | |
|
165 | # recursively. | |
|
166 | bheadrevs = [cl.rev(node) for node in bheads | |
|
167 | if cl.hasnode(node)] | |
|
168 | newheadrevs = [cl.rev(node) for node in newnodes | |
|
169 | if cl.hasnode(node)] | |
|
170 | ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs) | |
|
171 | # Remove duplicates - nodes that are in newheadrevs and are already | |
|
172 | # in bheadrevs. This can happen if you strip a node whose parent | |
|
173 | # was already a head (because they're on different branches). | |
|
174 | bheadrevs = sorted(set(bheadrevs).union(newheadrevs)) | |
|
175 | ||
|
176 | # Starting from tip means fewer passes over reachable. If we know | |
|
177 | # the new candidates are not ancestors of existing heads, we don't | |
|
178 | # have to examine ancestors of existing heads | |
|
179 | if ctxisnew: | |
|
180 | iterrevs = sorted(newheadrevs) | |
|
181 | else: | |
|
182 | iterrevs = list(bheadrevs) | |
|
183 | ||
|
184 | # This loop prunes out two kinds of heads - heads that are | |
|
185 | # superseded by a head in newheadrevs, and newheadrevs that are not | |
|
186 | # heads because an existing head is their descendant. | |
|
187 | while iterrevs: | |
|
188 | latest = iterrevs.pop() | |
|
189 | if latest not in bheadrevs: | |
|
190 | continue | |
|
191 | ancestors = set(cl.ancestors([latest], | |
|
192 | bheadrevs[0])) | |
|
193 | if ancestors: | |
|
194 | bheadrevs = [b for b in bheadrevs if b not in ancestors] | |
|
195 | self[branch] = [cl.node(rev) for rev in bheadrevs] | |
|
196 | tiprev = max(bheadrevs) | |
|
197 | if tiprev > self.tiprev: | |
|
198 | self.tipnode = cl.node(tiprev) | |
|
199 | self.tiprev = tiprev | |
|
200 | ||
|
201 | # There may be branches that cease to exist when the last commit in the | |
|
202 | # branch was stripped. This code filters them out. Note that the | |
|
203 | # branch that ceased to exist may not be in newbranches because | |
|
204 | # newbranches is the set of candidate heads, which when you strip the | |
|
205 | # last commit in a branch will be the parent branch. | |
|
206 | droppednodes = [] | |
|
207 | for branch in self.keys(): | |
|
208 | nodes = [head for head in self[branch] | |
|
209 | if cl.hasnode(head)] | |
|
210 | if not nodes: | |
|
211 | droppednodes.extend(nodes) | |
|
212 | del self[branch] | |
|
213 | if ((not self.validfor(repo)) or (self.tipnode in droppednodes)): | |
|
214 | ||
|
215 | # cache key are not valid anymore | |
|
216 | self.tipnode = nullid | |
|
217 | self.tiprev = nullrev | |
|
218 | for heads in self.values(): | |
|
219 | tiprev = max(cl.rev(node) for node in heads) | |
|
220 | if tiprev > self.tiprev: | |
|
221 | self.tipnode = cl.node(tiprev) | |
|
222 | self.tiprev = tiprev | |
|
223 | self.filteredhash = self._hashfiltered(repo) |
@@ -0,0 +1,219 b'' | |||
|
1 | # repoview.py - Filtered view of a localrepo object | |
|
2 | # | |
|
3 | # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org> | |
|
4 | # Logilab SA <contact@logilab.fr> | |
|
5 | # | |
|
6 | # This software may be used and distributed according to the terms of the | |
|
7 | # GNU General Public License version 2 or any later version. | |
|
8 | ||
|
9 | import copy | |
|
10 | import phases | |
|
11 | import util | |
|
12 | import obsolete, bookmarks, revset | |
|
13 | ||
|
14 | ||
|
15 | def hideablerevs(repo): | |
|
16 | """Revisions candidates to be hidden | |
|
17 | ||
|
18 | This is a standalone function to help extensions to wrap it.""" | |
|
19 | return obsolete.getrevs(repo, 'obsolete') | |
|
20 | ||
|
21 | def computehidden(repo): | |
|
22 | """compute the set of hidden revision to filter | |
|
23 | ||
|
24 | During most operation hidden should be filtered.""" | |
|
25 | assert not repo.changelog.filteredrevs | |
|
26 | hideable = hideablerevs(repo) | |
|
27 | if hideable: | |
|
28 | cl = repo.changelog | |
|
29 | firsthideable = min(hideable) | |
|
30 | revs = cl.revs(start=firsthideable) | |
|
31 | blockers = [r for r in revset._children(repo, revs, hideable) | |
|
32 | if r not in hideable] | |
|
33 | for par in repo[None].parents(): | |
|
34 | blockers.append(par.rev()) | |
|
35 | for bm in bookmarks.listbookmarks(repo).values(): | |
|
36 | blockers.append(repo[bm].rev()) | |
|
37 | blocked = cl.ancestors(blockers, inclusive=True) | |
|
38 | return frozenset(r for r in hideable if r not in blocked) | |
|
39 | return frozenset() | |
|
40 | ||
|
41 | def computeunserved(repo): | |
|
42 | """compute the set of revision that should be filtered when used a server | |
|
43 | ||
|
44 | Secret and hidden changeset should not pretend to be here.""" | |
|
45 | assert not repo.changelog.filteredrevs | |
|
46 | # fast path in simple case to avoid impact of non optimised code | |
|
47 | hiddens = filterrevs(repo, 'visible') | |
|
48 | if phases.hassecret(repo): | |
|
49 | cl = repo.changelog | |
|
50 | secret = phases.secret | |
|
51 | getphase = repo._phasecache.phase | |
|
52 | first = min(cl.rev(n) for n in repo._phasecache.phaseroots[secret]) | |
|
53 | revs = cl.revs(start=first) | |
|
54 | secrets = set(r for r in revs if getphase(repo, r) >= secret) | |
|
55 | return frozenset(hiddens | secrets) | |
|
56 | else: | |
|
57 | return hiddens | |
|
58 | return frozenset() | |
|
59 | ||
|
60 | def computemutable(repo): | |
|
61 | """compute the set of revision that should be filtered when used a server | |
|
62 | ||
|
63 | Secret and hidden changeset should not pretend to be here.""" | |
|
64 | assert not repo.changelog.filteredrevs | |
|
65 | # fast check to avoid revset call on huge repo | |
|
66 | if util.any(repo._phasecache.phaseroots[1:]): | |
|
67 | getphase = repo._phasecache.phase | |
|
68 | maymutable = filterrevs(repo, 'base') | |
|
69 | return frozenset(r for r in maymutable if getphase(repo, r)) | |
|
70 | return frozenset() | |
|
71 | ||
|
72 | def computeimpactable(repo): | |
|
73 | """Everything impactable by mutable revision | |
|
74 | ||
|
75 | The mutable filter still have some chance to get invalidated. This will | |
|
76 | happen when: | |
|
77 | ||
|
78 | - you garbage collect hidden changeset, | |
|
79 | - public phase is moved backward, | |
|
80 | - something is changed in the filtering (this could be fixed) | |
|
81 | ||
|
82 | This filter out any mutable changeset and any public changeset that may be | |
|
83 | impacted by something happening to a mutable revision. | |
|
84 | ||
|
85 | This is achieved by filtered everything with a revision number egal or | |
|
86 | higher than the first mutable changeset is filtered.""" | |
|
87 | assert not repo.changelog.filteredrevs | |
|
88 | cl = repo.changelog | |
|
89 | firstmutable = len(cl) | |
|
90 | for roots in repo._phasecache.phaseroots[1:]: | |
|
91 | if roots: | |
|
92 | firstmutable = min(firstmutable, min(cl.rev(r) for r in roots)) | |
|
93 | # protect from nullrev root | |
|
94 | firstmutable = max(0, firstmutable) | |
|
95 | return frozenset(xrange(firstmutable, len(cl))) | |
|
96 | ||
|
97 | # function to compute filtered set | |
|
98 | filtertable = {'visible': computehidden, | |
|
99 | 'served': computeunserved, | |
|
100 | 'immutable': computemutable, | |
|
101 | 'base': computeimpactable} | |
|
102 | ### Nearest subset relation | |
|
103 | # Nearest subset of filter X is a filter Y so that: | |
|
104 | # * Y is included in X, | |
|
105 | # * X - Y is as small as possible. | |
|
106 | # This create and ordering used for branchmap purpose. | |
|
107 | # the ordering may be partial | |
|
108 | subsettable = {None: 'visible', | |
|
109 | 'visible': 'served', | |
|
110 | 'served': 'immutable', | |
|
111 | 'immutable': 'base'} | |
|
112 | ||
|
113 | def filterrevs(repo, filtername): | |
|
114 | """returns set of filtered revision for this filter name""" | |
|
115 | if filtername not in repo.filteredrevcache: | |
|
116 | func = filtertable[filtername] | |
|
117 | repo.filteredrevcache[filtername] = func(repo.unfiltered()) | |
|
118 | return repo.filteredrevcache[filtername] | |
|
119 | ||
|
120 | class repoview(object): | |
|
121 | """Provide a read/write view of a repo through a filtered changelog | |
|
122 | ||
|
123 | This object is used to access a filtered version of a repository without | |
|
124 | altering the original repository object itself. We can not alter the | |
|
125 | original object for two main reasons: | |
|
126 | - It prevents the use of a repo with multiple filters at the same time. In | |
|
127 | particular when multiple threads are involved. | |
|
128 | - It makes scope of the filtering harder to control. | |
|
129 | ||
|
130 | This object behaves very closely to the original repository. All attribute | |
|
131 | operations are done on the original repository: | |
|
132 | - An access to `repoview.someattr` actually returns `repo.someattr`, | |
|
133 | - A write to `repoview.someattr` actually sets value of `repo.someattr`, | |
|
134 | - A deletion of `repoview.someattr` actually drops `someattr` | |
|
135 | from `repo.__dict__`. | |
|
136 | ||
|
137 | The only exception is the `changelog` property. It is overridden to return | |
|
138 | a (surface) copy of `repo.changelog` with some revisions filtered. The | |
|
139 | `filtername` attribute of the view control the revisions that need to be | |
|
140 | filtered. (the fact the changelog is copied is an implementation detail). | |
|
141 | ||
|
142 | Unlike attributes, this object intercepts all method calls. This means that | |
|
143 | all methods are run on the `repoview` object with the filtered `changelog` | |
|
144 | property. For this purpose the simple `repoview` class must be mixed with | |
|
145 | the actual class of the repository. This ensures that the resulting | |
|
146 | `repoview` object have the very same methods than the repo object. This | |
|
147 | leads to the property below. | |
|
148 | ||
|
149 | repoview.method() --> repo.__class__.method(repoview) | |
|
150 | ||
|
151 | The inheritance has to be done dynamically because `repo` can be of any | |
|
152 | subclasses of `localrepo`. Eg: `bundlerepo` or `httprepo`. | |
|
153 | """ | |
|
154 | ||
|
155 | def __init__(self, repo, filtername): | |
|
156 | object.__setattr__(self, '_unfilteredrepo', repo) | |
|
157 | object.__setattr__(self, 'filtername', filtername) | |
|
158 | object.__setattr__(self, '_clcachekey', None) | |
|
159 | object.__setattr__(self, '_clcache', None) | |
|
160 | ||
|
161 | # not a cacheproperty on purpose we shall implement a proper cache later | |
|
162 | @property | |
|
163 | def changelog(self): | |
|
164 | """return a filtered version of the changeset | |
|
165 | ||
|
166 | this changelog must not be used for writing""" | |
|
167 | # some cache may be implemented later | |
|
168 | unfi = self._unfilteredrepo | |
|
169 | unfichangelog = unfi.changelog | |
|
170 | revs = filterrevs(unfi, self.filtername) | |
|
171 | cl = self._clcache | |
|
172 | newkey = (len(unfichangelog), unfichangelog.tip(), hash(revs)) | |
|
173 | if cl is not None: | |
|
174 | # we need to check curkey too for some obscure reason. | |
|
175 | # MQ test show a corruption of the underlying repo (in _clcache) | |
|
176 | # without change in the cachekey. | |
|
177 | oldfilter = cl.filteredrevs | |
|
178 | try: | |
|
179 | cl.filterrevs = () # disable filtering for tip | |
|
180 | curkey = (len(cl), cl.tip(), hash(oldfilter)) | |
|
181 | finally: | |
|
182 | cl.filteredrevs = oldfilter | |
|
183 | if newkey != self._clcachekey or newkey != curkey: | |
|
184 | cl = None | |
|
185 | # could have been made None by the previous if | |
|
186 | if cl is None: | |
|
187 | cl = copy.copy(unfichangelog) | |
|
188 | cl.filteredrevs = revs | |
|
189 | object.__setattr__(self, '_clcache', cl) | |
|
190 | object.__setattr__(self, '_clcachekey', newkey) | |
|
191 | return cl | |
|
192 | ||
|
193 | def unfiltered(self): | |
|
194 | """Return an unfiltered version of a repo""" | |
|
195 | return self._unfilteredrepo | |
|
196 | ||
|
197 | def filtered(self, name): | |
|
198 | """Return a filtered version of a repository""" | |
|
199 | if name == self.filtername: | |
|
200 | return self | |
|
201 | return self.unfiltered().filtered(name) | |
|
202 | ||
|
203 | # everything access are forwarded to the proxied repo | |
|
204 | def __getattr__(self, attr): | |
|
205 | return getattr(self._unfilteredrepo, attr) | |
|
206 | ||
|
207 | def __setattr__(self, attr, value): | |
|
208 | return setattr(self._unfilteredrepo, attr, value) | |
|
209 | ||
|
210 | def __delattr__(self, attr): | |
|
211 | return delattr(self._unfilteredrepo, attr) | |
|
212 | ||
|
213 | # The `requirement` attribut is initialiazed during __init__. But | |
|
214 | # __getattr__ won't be called as it also exists on the class. We need | |
|
215 | # explicit forwarding to main repo here | |
|
216 | @property | |
|
217 | def requirements(self): | |
|
218 | return self._unfilteredrepo.requirements | |
|
219 |
@@ -0,0 +1,8 b'' | |||
|
1 | <entry> | |
|
2 | <title>{branch|escape}</title> | |
|
3 | <link rel="alternate" href="{urlbase}{url}rev/{node|short}"/> | |
|
4 | <id>{urlbase}{url}#branch-{node}</id> | |
|
5 | <updated>{date|rfc3339date}</updated> | |
|
6 | <published>{date|rfc3339date}</published> | |
|
7 | <content type="text"><![CDATA[{branch|strip|escape|addbreaks}]]></content> | |
|
8 | </entry> |
@@ -0,0 +1,11 b'' | |||
|
1 | {header} | |
|
2 | <id>{urlbase}{url}</id> | |
|
3 | <link rel="self" href="{urlbase}{url}atom-tags"/> | |
|
4 | <link rel="alternate" href="{urlbase}{url}tags"/> | |
|
5 | <title>{repo|escape}: branches</title> | |
|
6 | <summary>{repo|escape} branch history</summary> | |
|
7 | <author><name>Mercurial SCM</name></author> | |
|
8 | {latestentry%feedupdated} | |
|
9 | ||
|
10 | {entries%branchentry} | |
|
11 | </feed> |
@@ -0,0 +1,6 b'' | |||
|
1 | <item> | |
|
2 | <title>{branch|escape}</title> | |
|
3 | <link>{urlbase}{url}rev/{node|short}</link> | |
|
4 | <description><![CDATA[{branch|strip|escape|addbreaks}]]></description> | |
|
5 | <pubDate>{date|rfc822date}</pubDate> | |
|
6 | </item> |
@@ -0,0 +1,6 b'' | |||
|
1 | {header} | |
|
2 | <title>{repo|escape}: branches</title> | |
|
3 | <description>{repo|escape} branch history</description> | |
|
4 | {entries%branchentry} | |
|
5 | </channel> | |
|
6 | </rss> |
|
1 | NO CONTENT: new file 100644, binary diff hidden |
|
1 | NO CONTENT: new file 100644, binary diff hidden |
@@ -0,0 +1,106 b'' | |||
|
1 | from mercurial import ancestor | |
|
2 | ||
|
3 | # graph is a dict of child->parent adjacency lists for this graph: | |
|
4 | # o 13 | |
|
5 | # | | |
|
6 | # | o 12 | |
|
7 | # | | | |
|
8 | # | | o 11 | |
|
9 | # | | |\ | |
|
10 | # | | | | o 10 | |
|
11 | # | | | | | | |
|
12 | # | o---+ | 9 | |
|
13 | # | | | | | | |
|
14 | # o | | | | 8 | |
|
15 | # / / / / | |
|
16 | # | | o | 7 | |
|
17 | # | | | | | |
|
18 | # o---+ | 6 | |
|
19 | # / / / | |
|
20 | # | | o 5 | |
|
21 | # | |/ | |
|
22 | # | o 4 | |
|
23 | # | | | |
|
24 | # o | 3 | |
|
25 | # | | | |
|
26 | # | o 2 | |
|
27 | # |/ | |
|
28 | # o 1 | |
|
29 | # | | |
|
30 | # o 0 | |
|
31 | ||
|
32 | graph = {0: [-1], 1: [0], 2: [1], 3: [1], 4: [2], 5: [4], 6: [4], | |
|
33 | 7: [4], 8: [-1], 9: [6, 7], 10: [5], 11: [3, 7], 12: [9], | |
|
34 | 13: [8]} | |
|
35 | pfunc = graph.get | |
|
36 | ||
|
37 | class mockchangelog(object): | |
|
38 | parentrevs = graph.get | |
|
39 | ||
|
40 | def runmissingancestors(revs, bases): | |
|
41 | print "%% ancestors of %s and not of %s" % (revs, bases) | |
|
42 | print ancestor.missingancestors(revs, bases, pfunc) | |
|
43 | ||
|
44 | def test_missingancestors(): | |
|
45 | # Empty revs | |
|
46 | runmissingancestors([], [1]) | |
|
47 | runmissingancestors([], []) | |
|
48 | ||
|
49 | # If bases is empty, it's the same as if it were [nullrev] | |
|
50 | runmissingancestors([12], []) | |
|
51 | ||
|
52 | # Trivial case: revs == bases | |
|
53 | runmissingancestors([0], [0]) | |
|
54 | runmissingancestors([4, 5, 6], [6, 5, 4]) | |
|
55 | ||
|
56 | # With nullrev | |
|
57 | runmissingancestors([-1], [12]) | |
|
58 | runmissingancestors([12], [-1]) | |
|
59 | ||
|
60 | # 9 is a parent of 12. 7 is a parent of 9, so an ancestor of 12. 6 is an | |
|
61 | # ancestor of 12 but not of 7. | |
|
62 | runmissingancestors([12], [9]) | |
|
63 | runmissingancestors([9], [12]) | |
|
64 | runmissingancestors([12, 9], [7]) | |
|
65 | runmissingancestors([7, 6], [12]) | |
|
66 | ||
|
67 | # More complex cases | |
|
68 | runmissingancestors([10], [11, 12]) | |
|
69 | runmissingancestors([11], [10]) | |
|
70 | runmissingancestors([11], [10, 12]) | |
|
71 | runmissingancestors([12], [10]) | |
|
72 | runmissingancestors([12], [11]) | |
|
73 | runmissingancestors([10, 11, 12], [13]) | |
|
74 | runmissingancestors([13], [10, 11, 12]) | |
|
75 | ||
|
76 | def genlazyancestors(revs, stoprev=0, inclusive=False): | |
|
77 | print ("%% lazy ancestor set for %s, stoprev = %s, inclusive = %s" % | |
|
78 | (revs, stoprev, inclusive)) | |
|
79 | return ancestor.lazyancestors(mockchangelog, revs, stoprev=stoprev, | |
|
80 | inclusive=inclusive) | |
|
81 | ||
|
82 | def printlazyancestors(s, l): | |
|
83 | print [n for n in l if n in s] | |
|
84 | ||
|
85 | def test_lazyancestors(): | |
|
86 | # Empty revs | |
|
87 | s = genlazyancestors([]) | |
|
88 | printlazyancestors(s, [3, 0, -1]) | |
|
89 | ||
|
90 | # Standard example | |
|
91 | s = genlazyancestors([11, 13]) | |
|
92 | printlazyancestors(s, [11, 13, 7, 9, 8, 3, 6, 4, 1, -1, 0]) | |
|
93 | ||
|
94 | # Including revs | |
|
95 | s = genlazyancestors([11, 13], inclusive=True) | |
|
96 | printlazyancestors(s, [11, 13, 7, 9, 8, 3, 6, 4, 1, -1, 0]) | |
|
97 | ||
|
98 | # Test with stoprev | |
|
99 | s = genlazyancestors([11, 13], stoprev=6) | |
|
100 | printlazyancestors(s, [11, 13, 7, 9, 8, 3, 6, 4, 1, -1, 0]) | |
|
101 | s = genlazyancestors([11, 13], stoprev=6, inclusive=True) | |
|
102 | printlazyancestors(s, [11, 13, 7, 9, 8, 3, 6, 4, 1, -1, 0]) | |
|
103 | ||
|
104 | if __name__ == '__main__': | |
|
105 | test_missingancestors() | |
|
106 | test_lazyancestors() |
@@ -0,0 +1,46 b'' | |||
|
1 | % ancestors of [] and not of [1] | |
|
2 | [] | |
|
3 | % ancestors of [] and not of [] | |
|
4 | [] | |
|
5 | % ancestors of [12] and not of [] | |
|
6 | [0, 1, 2, 4, 6, 7, 9, 12] | |
|
7 | % ancestors of [0] and not of [0] | |
|
8 | [] | |
|
9 | % ancestors of [4, 5, 6] and not of [6, 5, 4] | |
|
10 | [] | |
|
11 | % ancestors of [-1] and not of [12] | |
|
12 | [] | |
|
13 | % ancestors of [12] and not of [-1] | |
|
14 | [0, 1, 2, 4, 6, 7, 9, 12] | |
|
15 | % ancestors of [12] and not of [9] | |
|
16 | [12] | |
|
17 | % ancestors of [9] and not of [12] | |
|
18 | [] | |
|
19 | % ancestors of [12, 9] and not of [7] | |
|
20 | [6, 9, 12] | |
|
21 | % ancestors of [7, 6] and not of [12] | |
|
22 | [] | |
|
23 | % ancestors of [10] and not of [11, 12] | |
|
24 | [5, 10] | |
|
25 | % ancestors of [11] and not of [10] | |
|
26 | [3, 7, 11] | |
|
27 | % ancestors of [11] and not of [10, 12] | |
|
28 | [3, 11] | |
|
29 | % ancestors of [12] and not of [10] | |
|
30 | [6, 7, 9, 12] | |
|
31 | % ancestors of [12] and not of [11] | |
|
32 | [6, 9, 12] | |
|
33 | % ancestors of [10, 11, 12] and not of [13] | |
|
34 | [0, 1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12] | |
|
35 | % ancestors of [13] and not of [10, 11, 12] | |
|
36 | [8, 13] | |
|
37 | % lazy ancestor set for [], stoprev = 0, inclusive = False | |
|
38 | [] | |
|
39 | % lazy ancestor set for [11, 13], stoprev = 0, inclusive = False | |
|
40 | [7, 8, 3, 4, 1, 0] | |
|
41 | % lazy ancestor set for [11, 13], stoprev = 0, inclusive = True | |
|
42 | [11, 13, 7, 8, 3, 4, 1, 0] | |
|
43 | % lazy ancestor set for [11, 13], stoprev = 6, inclusive = False | |
|
44 | [7, 8] | |
|
45 | % lazy ancestor set for [11, 13], stoprev = 6, inclusive = True | |
|
46 | [11, 13, 7, 8] |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
@@ -11,6 +11,9 b' PURE=' | |||
|
11 | 11 | PYFILES:=$(shell find mercurial hgext doc -name '*.py') |
|
12 | 12 | DOCFILES=mercurial/help/*.txt |
|
13 | 13 | |
|
14 | # Set this to e.g. "mingw32" to use a non-default compiler. | |
|
15 | COMPILER= | |
|
16 | ||
|
14 | 17 | help: |
|
15 | 18 | @echo 'Commonly used make targets:' |
|
16 | 19 | @echo ' all - build program and documentation' |
@@ -33,11 +36,15 b' help:' | |||
|
33 | 36 | all: build doc |
|
34 | 37 | |
|
35 | 38 | local: |
|
36 | $(PYTHON) setup.py $(PURE) build_py -c -d . build_ext -i build_hgexe -i build_mo | |
|
37 | $(PYTHON) hg version | |
|
39 | $(PYTHON) setup.py $(PURE) \ | |
|
40 | build_py -c -d . \ | |
|
41 | build_ext $(COMPILER:%=-c %) -i \ | |
|
42 | build_hgexe $(COMPILER:%=-c %) -i \ | |
|
43 | build_mo | |
|
44 | env HGRCPATH= $(PYTHON) hg version | |
|
38 | 45 | |
|
39 | 46 | build: |
|
40 | $(PYTHON) setup.py $(PURE) build | |
|
47 | $(PYTHON) setup.py $(PURE) build $(COMPILER:%=-c %) | |
|
41 | 48 | |
|
42 | 49 | doc: |
|
43 | 50 | $(MAKE) -C doc |
@@ -129,13 +129,14 b' pypats = [' | |||
|
129 | 129 | (r'(?<!def)\s+(cmp)\(', "cmp is not available in Python 3+"), |
|
130 | 130 | (r'\breduce\s*\(.*', "reduce is not available in Python 3+"), |
|
131 | 131 | (r'\.has_key\b', "dict.has_key is not available in Python 3+"), |
|
132 | (r'\s<>\s', '<> operator is not available in Python 3+, use !='), | |
|
132 | 133 | (r'^\s*\t', "don't use tabs"), |
|
133 | 134 | (r'\S;\s*\n', "semicolon"), |
|
134 | 135 | (r'[^_]_\("[^"]+"\s*%', "don't use % inside _()"), |
|
135 | 136 | (r"[^_]_\('[^']+'\s*%", "don't use % inside _()"), |
|
136 | (r'\w,\w', "missing whitespace after ,"), | |
|
137 | (r'\w[+/*\-<>]\w', "missing whitespace in expression"), | |
|
138 |
(r'^\s+\w |
|
|
137 | (r'(\w|\)),\w', "missing whitespace after ,"), | |
|
138 | (r'(\w|\))[+/*\-<>]\w', "missing whitespace in expression"), | |
|
139 | (r'^\s+(\w|\.)+=\w[^,()\n]*$', "missing whitespace in assignment"), | |
|
139 | 140 | (r'(\s+)try:\n((?:\n|\1\s.*\n)+?)\1except.*?:\n' |
|
140 | 141 | r'((?:\n|\1\s.*\n)+?)\1finally:', 'no try/except/finally in Python 2.4'), |
|
141 | 142 | (r'(\s+)try:\n((?:\n|\1\s.*\n)*?)\1\s*yield\b.*?' |
@@ -185,6 +186,8 b' pypats = [' | |||
|
185 | 186 | (r'[^^+=*/!<>&| %-](\s=|=\s)[^= ]', |
|
186 | 187 | "wrong whitespace around ="), |
|
187 | 188 | (r'raise Exception', "don't raise generic exceptions"), |
|
189 | (r'raise [^,(]+, (\([^\)]+\)|[^,\(\)]+)$', | |
|
190 | "don't use old-style two-argument raise, use Exception(message)"), | |
|
188 | 191 | (r' is\s+(not\s+)?["\'0-9-]', "object comparison with literal"), |
|
189 | 192 | (r' [=!]=\s+(True|False|None)', |
|
190 | 193 | "comparison with singleton, use 'is' or 'is not' instead"), |
@@ -211,11 +214,11 b' pypats = [' | |||
|
211 | 214 | (r'\.strip\(\)\.split\(\)', "no need to strip before splitting"), |
|
212 | 215 | (r'^\s*except\s*:', "warning: naked except clause", r'#.*re-raises'), |
|
213 | 216 | (r':\n( )*( ){1,3}[^ ]', "must indent 4 spaces"), |
|
217 | (r'ui\.(status|progress|write|note|warn)\([\'\"]x', | |
|
218 | "missing _() in ui message (use () to hide false-positives)"), | |
|
214 | 219 | ], |
|
215 | 220 | # warnings |
|
216 | 221 | [ |
|
217 | (r'ui\.(status|progress|write|note|warn)\([\'\"]x', | |
|
218 | "warning: unwrapped ui message"), | |
|
219 | 222 | ] |
|
220 | 223 | ] |
|
221 | 224 |
@@ -15,8 +15,43 b'' | |||
|
15 | 15 | # The whole snipped is activated only under windows, mouse wheel |
|
16 | 16 | # bindings working already under MacOSX and Linux. |
|
17 | 17 | |
|
18 | if {[catch {package require Ttk}]} { | |
|
19 | # use a shim | |
|
20 | namespace eval ttk { | |
|
21 | proc style args {} | |
|
22 | ||
|
23 | proc entry args { | |
|
24 | eval [linsert $args 0 ::entry] -relief flat | |
|
25 | } | |
|
26 | } | |
|
27 | ||
|
28 | interp alias {} ttk::button {} button | |
|
29 | interp alias {} ttk::frame {} frame | |
|
30 | interp alias {} ttk::label {} label | |
|
31 | interp alias {} ttk::scrollbar {} scrollbar | |
|
32 | interp alias {} ttk::optionMenu {} tk_optionMenu | |
|
33 | } else { | |
|
34 | proc ::ttk::optionMenu {w varName firstValue args} { | |
|
35 | upvar #0 $varName var | |
|
36 | ||
|
37 | if {![info exists var]} { | |
|
38 | set var $firstValue | |
|
39 | } | |
|
40 | ttk::menubutton $w -textvariable $varName -menu $w.menu \ | |
|
41 | -direction flush | |
|
42 | menu $w.menu -tearoff 0 | |
|
43 | $w.menu add radiobutton -label $firstValue -variable $varName | |
|
44 | foreach i $args { | |
|
45 | $w.menu add radiobutton -label $i -variable $varName | |
|
46 | } | |
|
47 | return $w.menu | |
|
48 | } | |
|
49 | } | |
|
50 | ||
|
18 | 51 | if {[tk windowingsystem] eq "win32"} { |
|
19 | 52 | |
|
53 | ttk::style theme use xpnative | |
|
54 | ||
|
20 | 55 | set mw_classes [list Text Listbox Table TreeCtrl] |
|
21 | 56 | foreach class $mw_classes { bind $class <MouseWheel> {} } |
|
22 | 57 | |
@@ -72,6 +107,12 b' proc ::tk::MouseWheel {wFired X Y D {shi' | |||
|
72 | 107 | bind all <MouseWheel> [list ::tk::MouseWheel %W %X %Y %D 0] |
|
73 | 108 | |
|
74 | 109 | # end of win32 section |
|
110 | } else { | |
|
111 | ||
|
112 | if {[ttk::style theme use] eq "default"} { | |
|
113 | ttk::style theme use clam | |
|
114 | } | |
|
115 | ||
|
75 | 116 | } |
|
76 | 117 | |
|
77 | 118 | |
@@ -480,7 +521,7 b' proc error_popup msg {' | |||
|
480 | 521 | wm transient $w . |
|
481 | 522 | message $w.m -text $msg -justify center -aspect 400 |
|
482 | 523 | pack $w.m -side top -fill x -padx 20 -pady 20 |
|
483 | button $w.ok -text OK -command "destroy $w" | |
|
524 | ttk::button $w.ok -text OK -command "destroy $w" | |
|
484 | 525 | pack $w.ok -side bottom -fill x |
|
485 | 526 | bind $w <Visibility> "grab $w; focus $w" |
|
486 | 527 | tkwait window $w |
@@ -526,11 +567,11 b' proc makewindow {} {' | |||
|
526 | 567 | set geometry(ctexth) [expr {($texth - 8) / |
|
527 | 568 | [font metrics $textfont -linespace]}] |
|
528 | 569 | } |
|
529 | frame .ctop.top | |
|
530 | frame .ctop.top.bar | |
|
570 | ttk::frame .ctop.top | |
|
571 | ttk::frame .ctop.top.bar | |
|
531 | 572 | pack .ctop.top.bar -side bottom -fill x |
|
532 | 573 | set cscroll .ctop.top.csb |
|
533 |
scrollbar $cscroll -command {allcanvs yview} |
|
|
574 | ttk::scrollbar $cscroll -command {allcanvs yview} | |
|
534 | 575 | pack $cscroll -side right -fill y |
|
535 | 576 | panedwindow .ctop.top.clist -orient horizontal -sashpad 0 -handlesize 4 |
|
536 | 577 | pack .ctop.top.clist -side top -fill both -expand 1 |
@@ -538,15 +579,15 b' proc makewindow {} {' | |||
|
538 | 579 | set canv .ctop.top.clist.canv |
|
539 | 580 | canvas $canv -height $geometry(canvh) -width $geometry(canv1) \ |
|
540 | 581 | -bg $bgcolor -bd 0 \ |
|
541 |
-yscrollincr $linespc -yscrollcommand "$cscroll set" -selectbackground |
|
|
582 | -yscrollincr $linespc -yscrollcommand "$cscroll set" -selectbackground "#c0c0c0" | |
|
542 | 583 | .ctop.top.clist add $canv |
|
543 | 584 | set canv2 .ctop.top.clist.canv2 |
|
544 | 585 | canvas $canv2 -height $geometry(canvh) -width $geometry(canv2) \ |
|
545 |
-bg $bgcolor -bd 0 -yscrollincr $linespc -selectbackground |
|
|
586 | -bg $bgcolor -bd 0 -yscrollincr $linespc -selectbackground "#c0c0c0" | |
|
546 | 587 | .ctop.top.clist add $canv2 |
|
547 | 588 | set canv3 .ctop.top.clist.canv3 |
|
548 | 589 | canvas $canv3 -height $geometry(canvh) -width $geometry(canv3) \ |
|
549 |
-bg $bgcolor -bd 0 -yscrollincr $linespc -selectbackground |
|
|
590 | -bg $bgcolor -bd 0 -yscrollincr $linespc -selectbackground "#c0c0c0" | |
|
550 | 591 | .ctop.top.clist add $canv3 |
|
551 | 592 | bind .ctop.top.clist <Configure> {resizeclistpanes %W %w} |
|
552 | 593 | |
@@ -557,7 +598,7 b' proc makewindow {} {' | |||
|
557 | 598 | -command gotocommit -width 8 |
|
558 | 599 | $sha1but conf -disabledforeground [$sha1but cget -foreground] |
|
559 | 600 | pack .ctop.top.bar.sha1label -side left |
|
560 | entry $sha1entry -width 40 -font $textfont -textvariable sha1string | |
|
601 | ttk::entry $sha1entry -width 40 -font $textfont -textvariable sha1string | |
|
561 | 602 | trace add variable sha1string write sha1change |
|
562 | 603 | pack $sha1entry -side left -pady 2 |
|
563 | 604 | |
@@ -577,25 +618,25 b' proc makewindow {} {' | |||
|
577 | 618 | 0x00, 0x38, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f, 0x00, 0x38, 0x00, 0x1c, |
|
578 | 619 | 0x00, 0x0e, 0x00, 0x07, 0x80, 0x03, 0xc0, 0x01}; |
|
579 | 620 | } |
|
580 | button .ctop.top.bar.leftbut -image bm-left -command goback \ | |
|
621 | ttk::button .ctop.top.bar.leftbut -image bm-left -command goback \ | |
|
581 | 622 | -state disabled -width 26 |
|
582 | 623 | pack .ctop.top.bar.leftbut -side left -fill y |
|
583 | button .ctop.top.bar.rightbut -image bm-right -command goforw \ | |
|
624 | ttk::button .ctop.top.bar.rightbut -image bm-right -command goforw \ | |
|
584 | 625 | -state disabled -width 26 |
|
585 | 626 | pack .ctop.top.bar.rightbut -side left -fill y |
|
586 | 627 | |
|
587 | button .ctop.top.bar.findbut -text "Find" -command dofind | |
|
628 | ttk::button .ctop.top.bar.findbut -text "Find" -command dofind | |
|
588 | 629 | pack .ctop.top.bar.findbut -side left |
|
589 | 630 | set findstring {} |
|
590 | 631 | set fstring .ctop.top.bar.findstring |
|
591 | 632 | lappend entries $fstring |
|
592 | entry $fstring -width 30 -font $textfont -textvariable findstring | |
|
633 | ttk::entry $fstring -width 30 -font $textfont -textvariable findstring | |
|
593 | 634 | pack $fstring -side left -expand 1 -fill x |
|
594 | 635 | set findtype Exact |
|
595 |
set findtypemenu [t |
|
|
636 | set findtypemenu [ttk::optionMenu .ctop.top.bar.findtype \ | |
|
596 | 637 | findtype Exact IgnCase Regexp] |
|
597 | 638 | set findloc "All fields" |
|
598 |
t |
|
|
639 | ttk::optionMenu .ctop.top.bar.findloc findloc "All fields" Headline \ | |
|
599 | 640 | Comments Author Committer Files Pickaxe |
|
600 | 641 | pack .ctop.top.bar.findloc -side right |
|
601 | 642 | pack .ctop.top.bar.findtype -side right |
@@ -604,14 +645,14 b' proc makewindow {} {' | |||
|
604 | 645 | |
|
605 | 646 | panedwindow .ctop.cdet -orient horizontal |
|
606 | 647 | .ctop add .ctop.cdet |
|
607 | frame .ctop.cdet.left | |
|
648 | ttk::frame .ctop.cdet.left | |
|
608 | 649 | set ctext .ctop.cdet.left.ctext |
|
609 | 650 | text $ctext -fg $fgcolor -bg $bgcolor -state disabled -font $textfont \ |
|
610 | 651 | -width $geometry(ctextw) -height $geometry(ctexth) \ |
|
611 | 652 | -yscrollcommand ".ctop.cdet.left.sb set" \ |
|
612 | 653 | -xscrollcommand ".ctop.cdet.left.hb set" -wrap none |
|
613 | scrollbar .ctop.cdet.left.sb -command "$ctext yview" | |
|
614 | scrollbar .ctop.cdet.left.hb -orient horizontal -command "$ctext xview" | |
|
654 | ttk::scrollbar .ctop.cdet.left.sb -command "$ctext yview" | |
|
655 | ttk::scrollbar .ctop.cdet.left.hb -orient horizontal -command "$ctext xview" | |
|
615 | 656 | pack .ctop.cdet.left.sb -side right -fill y |
|
616 | 657 | pack .ctop.cdet.left.hb -side bottom -fill x |
|
617 | 658 | pack $ctext -side left -fill both -expand 1 |
@@ -643,12 +684,12 b' proc makewindow {} {' | |||
|
643 | 684 | $ctext tag conf found -back yellow |
|
644 | 685 | } |
|
645 | 686 | |
|
646 | frame .ctop.cdet.right | |
|
687 | ttk::frame .ctop.cdet.right | |
|
647 | 688 | set cflist .ctop.cdet.right.cfiles |
|
648 | 689 | listbox $cflist -fg $fgcolor -bg $bgcolor \ |
|
649 | 690 | -selectmode extended -width $geometry(cflistw) \ |
|
650 | 691 | -yscrollcommand ".ctop.cdet.right.sb set" |
|
651 | scrollbar .ctop.cdet.right.sb -command "$cflist yview" | |
|
692 | ttk::scrollbar .ctop.cdet.right.sb -command "$cflist yview" | |
|
652 | 693 | pack .ctop.cdet.right.sb -side right -fill y |
|
653 | 694 | pack $cflist -side left -fill both -expand 1 |
|
654 | 695 | .ctop.cdet add .ctop.cdet.right |
@@ -901,7 +942,7 b' Copyright \xa9 2005 Paul Mackerras' | |||
|
901 | 942 | Use and redistribute under the terms of the GNU General Public License} \ |
|
902 | 943 | -justify center -aspect 400 |
|
903 | 944 | pack $w.m -side top -fill x -padx 20 -pady 20 |
|
904 | button $w.ok -text Close -command "destroy $w" | |
|
945 | ttk::button $w.ok -text Close -command "destroy $w" | |
|
905 | 946 | pack $w.ok -side bottom |
|
906 | 947 | } |
|
907 | 948 | |
@@ -1219,7 +1260,7 b' proc drawtags {id x xt y1} {' | |||
|
1219 | 1260 | } else { |
|
1220 | 1261 | # draw a head or other ref |
|
1221 | 1262 | if {[incr nheads -1] >= 0} { |
|
1222 |
set col |
|
|
1263 | set col "#00ff00" | |
|
1223 | 1264 | } else { |
|
1224 | 1265 | set col "#ddddff" |
|
1225 | 1266 | } |
@@ -2417,8 +2458,7 b' proc selectline {l isnew} {' | |||
|
2417 | 2458 | set currentid $id |
|
2418 | 2459 | $sha1entry delete 0 end |
|
2419 | 2460 | $sha1entry insert 0 $id |
|
2420 |
$sha1entry selection |
|
|
2421 | $sha1entry selection to end | |
|
2461 | $sha1entry selection range 0 end | |
|
2422 | 2462 | |
|
2423 | 2463 | $ctext conf -state normal |
|
2424 | 2464 | $ctext delete 0.0 end |
@@ -3675,36 +3715,36 b' proc mkpatch {} {' | |||
|
3675 | 3715 | set patchtop $top |
|
3676 | 3716 | catch {destroy $top} |
|
3677 | 3717 | toplevel $top |
|
3678 | label $top.title -text "Generate patch" | |
|
3718 | ttk::label $top.title -text "Generate patch" | |
|
3679 | 3719 | grid $top.title - -pady 10 |
|
3680 | label $top.from -text "From:" | |
|
3681 |
entry $top.fromsha1 -width 40 |
|
|
3720 | ttk::label $top.from -text "From:" | |
|
3721 | ttk::entry $top.fromsha1 -width 40 | |
|
3682 | 3722 | $top.fromsha1 insert 0 $oldid |
|
3683 | 3723 | $top.fromsha1 conf -state readonly |
|
3684 | 3724 | grid $top.from $top.fromsha1 -sticky w |
|
3685 |
entry $top.fromhead -width 60 |
|
|
3725 | ttk::entry $top.fromhead -width 60 | |
|
3686 | 3726 | $top.fromhead insert 0 $oldhead |
|
3687 | 3727 | $top.fromhead conf -state readonly |
|
3688 | 3728 | grid x $top.fromhead -sticky w |
|
3689 | label $top.to -text "To:" | |
|
3690 |
entry $top.tosha1 -width 40 |
|
|
3729 | ttk::label $top.to -text "To:" | |
|
3730 | ttk::entry $top.tosha1 -width 40 | |
|
3691 | 3731 | $top.tosha1 insert 0 $newid |
|
3692 | 3732 | $top.tosha1 conf -state readonly |
|
3693 | 3733 | grid $top.to $top.tosha1 -sticky w |
|
3694 |
entry $top.tohead -width 60 |
|
|
3734 | ttk::entry $top.tohead -width 60 | |
|
3695 | 3735 | $top.tohead insert 0 $newhead |
|
3696 | 3736 | $top.tohead conf -state readonly |
|
3697 | 3737 | grid x $top.tohead -sticky w |
|
3698 |
button $top.rev -text "Reverse" -command mkpatchrev |
|
|
3738 | ttk::button $top.rev -text "Reverse" -command mkpatchrev | |
|
3699 | 3739 | grid $top.rev x -pady 10 |
|
3700 | label $top.flab -text "Output file:" | |
|
3701 | entry $top.fname -width 60 | |
|
3740 | ttk::label $top.flab -text "Output file:" | |
|
3741 | ttk::entry $top.fname -width 60 | |
|
3702 | 3742 | $top.fname insert 0 [file normalize "patch$patchnum.patch"] |
|
3703 | 3743 | incr patchnum |
|
3704 | 3744 | grid $top.flab $top.fname -sticky w |
|
3705 | frame $top.buts | |
|
3706 | button $top.buts.gen -text "Generate" -command mkpatchgo | |
|
3707 | button $top.buts.can -text "Cancel" -command mkpatchcan | |
|
3745 | ttk::frame $top.buts | |
|
3746 | ttk::button $top.buts.gen -text "Generate" -command mkpatchgo | |
|
3747 | ttk::button $top.buts.can -text "Cancel" -command mkpatchcan | |
|
3708 | 3748 | grid $top.buts.gen $top.buts.can |
|
3709 | 3749 | grid columnconfigure $top.buts 0 -weight 1 -uniform a |
|
3710 | 3750 | grid columnconfigure $top.buts 1 -weight 1 -uniform a |
@@ -3755,23 +3795,23 b' proc mktag {} {' | |||
|
3755 | 3795 | set mktagtop $top |
|
3756 | 3796 | catch {destroy $top} |
|
3757 | 3797 | toplevel $top |
|
3758 | label $top.title -text "Create tag" | |
|
3798 | ttk::label $top.title -text "Create tag" | |
|
3759 | 3799 | grid $top.title - -pady 10 |
|
3760 | label $top.id -text "ID:" | |
|
3761 |
entry $top.sha1 -width 40 |
|
|
3800 | ttk::label $top.id -text "ID:" | |
|
3801 | ttk::entry $top.sha1 -width 40 | |
|
3762 | 3802 | $top.sha1 insert 0 $rowmenuid |
|
3763 | 3803 | $top.sha1 conf -state readonly |
|
3764 | 3804 | grid $top.id $top.sha1 -sticky w |
|
3765 |
entry $top.head -width 60 |
|
|
3805 | ttk::entry $top.head -width 60 | |
|
3766 | 3806 | $top.head insert 0 [lindex $commitinfo($rowmenuid) 0] |
|
3767 | 3807 | $top.head conf -state readonly |
|
3768 | 3808 | grid x $top.head -sticky w |
|
3769 | label $top.tlab -text "Tag name:" | |
|
3770 | entry $top.tag -width 60 | |
|
3809 | ttk::label $top.tlab -text "Tag name:" | |
|
3810 | ttk::entry $top.tag -width 60 | |
|
3771 | 3811 | grid $top.tlab $top.tag -sticky w |
|
3772 | frame $top.buts | |
|
3773 | button $top.buts.gen -text "Create" -command mktaggo | |
|
3774 | button $top.buts.can -text "Cancel" -command mktagcan | |
|
3812 | ttk::frame $top.buts | |
|
3813 | ttk::button $top.buts.gen -text "Create" -command mktaggo | |
|
3814 | ttk::button $top.buts.can -text "Cancel" -command mktagcan | |
|
3775 | 3815 | grid $top.buts.gen $top.buts.can |
|
3776 | 3816 | grid columnconfigure $top.buts 0 -weight 1 -uniform a |
|
3777 | 3817 | grid columnconfigure $top.buts 1 -weight 1 -uniform a |
@@ -3835,27 +3875,27 b' proc writecommit {} {' | |||
|
3835 | 3875 | set wrcomtop $top |
|
3836 | 3876 | catch {destroy $top} |
|
3837 | 3877 | toplevel $top |
|
3838 | label $top.title -text "Write commit to file" | |
|
3878 | ttk::label $top.title -text "Write commit to file" | |
|
3839 | 3879 | grid $top.title - -pady 10 |
|
3840 | label $top.id -text "ID:" | |
|
3841 |
entry $top.sha1 -width 40 |
|
|
3880 | ttk::label $top.id -text "ID:" | |
|
3881 | ttk::entry $top.sha1 -width 40 | |
|
3842 | 3882 | $top.sha1 insert 0 $rowmenuid |
|
3843 | 3883 | $top.sha1 conf -state readonly |
|
3844 | 3884 | grid $top.id $top.sha1 -sticky w |
|
3845 |
entry $top.head -width 60 |
|
|
3885 | ttk::entry $top.head -width 60 | |
|
3846 | 3886 | $top.head insert 0 [lindex $commitinfo($rowmenuid) 0] |
|
3847 | 3887 | $top.head conf -state readonly |
|
3848 | 3888 | grid x $top.head -sticky w |
|
3849 | label $top.clab -text "Command:" | |
|
3850 | entry $top.cmd -width 60 -textvariable wrcomcmd | |
|
3889 | ttk::label $top.clab -text "Command:" | |
|
3890 | ttk::entry $top.cmd -width 60 -textvariable wrcomcmd | |
|
3851 | 3891 | grid $top.clab $top.cmd -sticky w -pady 10 |
|
3852 | label $top.flab -text "Output file:" | |
|
3853 | entry $top.fname -width 60 | |
|
3892 | ttk::label $top.flab -text "Output file:" | |
|
3893 | ttk::entry $top.fname -width 60 | |
|
3854 | 3894 | $top.fname insert 0 [file normalize "commit-[string range $rowmenuid 0 6]"] |
|
3855 | 3895 | grid $top.flab $top.fname -sticky w |
|
3856 | frame $top.buts | |
|
3857 | button $top.buts.gen -text "Write" -command wrcomgo | |
|
3858 | button $top.buts.can -text "Cancel" -command wrcomcan | |
|
3896 | ttk::frame $top.buts | |
|
3897 | ttk::button $top.buts.gen -text "Write" -command wrcomgo | |
|
3898 | ttk::button $top.buts.can -text "Cancel" -command wrcomcan | |
|
3859 | 3899 | grid $top.buts.gen $top.buts.can |
|
3860 | 3900 | grid columnconfigure $top.buts 0 -weight 1 -uniform a |
|
3861 | 3901 | grid columnconfigure $top.buts 1 -weight 1 -uniform a |
@@ -19,7 +19,7 b' vimdiff.args=$local $other $base' | |||
|
19 | 19 | vimdiff.check=changed |
|
20 | 20 | vimdiff.priority=-10 |
|
21 | 21 | |
|
22 |
merge.checkconflicts |
|
|
22 | merge.check=conflicts | |
|
23 | 23 | merge.priority=-100 |
|
24 | 24 | |
|
25 | 25 | gpyfm.gui=True |
@@ -43,7 +43,7 b' diffmerge.regkeyalt=Software\\Wow6432Node' | |||
|
43 | 43 | diffmerge.regname=Location |
|
44 | 44 | diffmerge.priority=-7 |
|
45 | 45 | diffmerge.args=-nosplash -merge -title1=local -title2=merged -title3=other $local $base $other -result=$output |
|
46 |
diffmerge.checkchanged |
|
|
46 | diffmerge.check=changed | |
|
47 | 47 | diffmerge.gui=True |
|
48 | 48 | diffmerge.diffargs=--nosplash --title1='$plabel1' --title2='$clabel' $parent $child |
|
49 | 49 | |
@@ -59,7 +59,7 b' p4merge.diffargs=$parent $child' | |||
|
59 | 59 | tortoisemerge.args=/base:$base /mine:$local /theirs:$other /merged:$output |
|
60 | 60 | tortoisemerge.regkey=Software\TortoiseSVN |
|
61 | 61 | tortoisemerge.regkeyalt=Software\Wow6432Node\TortoiseSVN |
|
62 |
tortoisemerge.checkchanged |
|
|
62 | tortoisemerge.check=changed | |
|
63 | 63 | tortoisemerge.gui=True |
|
64 | 64 | tortoisemerge.priority=-8 |
|
65 | 65 | tortoisemerge.diffargs=/base:$parent /mine:$child /basename:'$plabel1' /minename:'$clabel' |
@@ -93,7 +93,7 b' winmerge.args=/e /x /wl /ub /dl other /d' | |||
|
93 | 93 | winmerge.regkey=Software\Thingamahoochie\WinMerge |
|
94 | 94 | winmerge.regkeyalt=Software\Wow6432Node\Thingamahoochie\WinMerge\ |
|
95 | 95 | winmerge.regname=Executable |
|
96 |
winmerge.checkchanged |
|
|
96 | winmerge.check=changed | |
|
97 | 97 | winmerge.gui=True |
|
98 | 98 | winmerge.priority=-10 |
|
99 | 99 | winmerge.diffargs=/r /e /x /ub /wl /dl '$plabel1' /dr '$clabel' $parent $child |
@@ -119,6 +119,5 b' UltraCompare.args = $base $local $other ' | |||
|
119 | 119 | UltraCompare.priority = -2 |
|
120 | 120 | UltraCompare.gui = True |
|
121 | 121 | UltraCompare.binary = True |
|
122 |
UltraCompare.checkconflicts |
|
|
123 | UltraCompare.checkchanged = True | |
|
122 | UltraCompare.check = conflicts,changed | |
|
124 | 123 | UltraCompare.diffargs=$child $parent -title1 $clabel -title2 $plabel1 |
@@ -1,9 +1,13 b'' | |||
|
1 | 1 | # perf.py - performance test routines |
|
2 | 2 | '''helper extension to measure performance''' |
|
3 | 3 | |
|
4 | from mercurial import cmdutil, scmutil, util, match, commands | |
|
4 | from mercurial import cmdutil, scmutil, util, match, commands, obsolete | |
|
5 | from mercurial import repoview, branchmap | |
|
5 | 6 | import time, os, sys |
|
6 | 7 | |
|
8 | cmdtable = {} | |
|
9 | command = cmdutil.command(cmdtable) | |
|
10 | ||
|
7 | 11 | def timer(func, title=None): |
|
8 | 12 | results = [] |
|
9 | 13 | begin = time.time() |
@@ -29,6 +33,7 b' def timer(func, title=None):' | |||
|
29 | 33 | sys.stderr.write("! wall %f comb %f user %f sys %f (best of %d)\n" |
|
30 | 34 | % (m[0], m[1] + m[2], m[1], m[2], count)) |
|
31 | 35 | |
|
36 | @command('perfwalk') | |
|
32 | 37 | def perfwalk(ui, repo, *pats): |
|
33 | 38 | try: |
|
34 | 39 | m = scmutil.match(repo[None], pats, {}) |
@@ -40,11 +45,14 b' def perfwalk(ui, repo, *pats):' | |||
|
40 | 45 | except Exception: |
|
41 | 46 | timer(lambda: len(list(cmdutil.walk(repo, pats, {})))) |
|
42 | 47 | |
|
43 | def perfstatus(ui, repo, *pats): | |
|
48 | @command('perfstatus', | |
|
49 | [('u', 'unknown', False, | |
|
50 | 'ask status to look for unknown files')]) | |
|
51 | def perfstatus(ui, repo, **opts): | |
|
44 | 52 | #m = match.always(repo.root, repo.getcwd()) |
|
45 | 53 | #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False, |
|
46 | 54 | # False)))) |
|
47 | timer(lambda: sum(map(len, repo.status()))) | |
|
55 | timer(lambda: sum(map(len, repo.status(**opts)))) | |
|
48 | 56 | |
|
49 | 57 | def clearcaches(cl): |
|
50 | 58 | # behave somewhat consistently across internal API changes |
@@ -55,6 +63,7 b' def clearcaches(cl):' | |||
|
55 | 63 | cl._nodecache = {nullid: nullrev} |
|
56 | 64 | cl._nodepos = None |
|
57 | 65 | |
|
66 | @command('perfheads') | |
|
58 | 67 | def perfheads(ui, repo): |
|
59 | 68 | cl = repo.changelog |
|
60 | 69 | def d(): |
@@ -62,6 +71,7 b' def perfheads(ui, repo):' | |||
|
62 | 71 | clearcaches(cl) |
|
63 | 72 | timer(d) |
|
64 | 73 | |
|
74 | @command('perftags') | |
|
65 | 75 | def perftags(ui, repo): |
|
66 | 76 | import mercurial.changelog, mercurial.manifest |
|
67 | 77 | def t(): |
@@ -71,6 +81,7 b' def perftags(ui, repo):' | |||
|
71 | 81 | return len(repo.tags()) |
|
72 | 82 | timer(t) |
|
73 | 83 | |
|
84 | @command('perfancestors') | |
|
74 | 85 | def perfancestors(ui, repo): |
|
75 | 86 | heads = repo.changelog.headrevs() |
|
76 | 87 | def d(): |
@@ -78,6 +89,17 b' def perfancestors(ui, repo):' | |||
|
78 | 89 | pass |
|
79 | 90 | timer(d) |
|
80 | 91 | |
|
92 | @command('perfancestorset') | |
|
93 | def perfancestorset(ui, repo, revset): | |
|
94 | revs = repo.revs(revset) | |
|
95 | heads = repo.changelog.headrevs() | |
|
96 | def d(): | |
|
97 | s = repo.changelog.ancestors(heads) | |
|
98 | for rev in revs: | |
|
99 | rev in s | |
|
100 | timer(d) | |
|
101 | ||
|
102 | @command('perfdirstate') | |
|
81 | 103 | def perfdirstate(ui, repo): |
|
82 | 104 | "a" in repo.dirstate |
|
83 | 105 | def d(): |
@@ -85,6 +107,7 b' def perfdirstate(ui, repo):' | |||
|
85 | 107 | "a" in repo.dirstate |
|
86 | 108 | timer(d) |
|
87 | 109 | |
|
110 | @command('perfdirstatedirs') | |
|
88 | 111 | def perfdirstatedirs(ui, repo): |
|
89 | 112 | "a" in repo.dirstate |
|
90 | 113 | def d(): |
@@ -92,6 +115,7 b' def perfdirstatedirs(ui, repo):' | |||
|
92 | 115 | del repo.dirstate._dirs |
|
93 | 116 | timer(d) |
|
94 | 117 | |
|
118 | @command('perfdirstatewrite') | |
|
95 | 119 | def perfdirstatewrite(ui, repo): |
|
96 | 120 | ds = repo.dirstate |
|
97 | 121 | "a" in ds |
@@ -100,6 +124,7 b' def perfdirstatewrite(ui, repo):' | |||
|
100 | 124 | ds.write() |
|
101 | 125 | timer(d) |
|
102 | 126 | |
|
127 | @command('perfmanifest') | |
|
103 | 128 | def perfmanifest(ui, repo): |
|
104 | 129 | def d(): |
|
105 | 130 | t = repo.manifest.tip() |
@@ -108,6 +133,7 b' def perfmanifest(ui, repo):' | |||
|
108 | 133 | repo.manifest._cache = None |
|
109 | 134 | timer(d) |
|
110 | 135 | |
|
136 | @command('perfchangeset') | |
|
111 | 137 | def perfchangeset(ui, repo, rev): |
|
112 | 138 | n = repo[rev].node() |
|
113 | 139 | def d(): |
@@ -115,6 +141,7 b' def perfchangeset(ui, repo, rev):' | |||
|
115 | 141 | #repo.changelog._cache = None |
|
116 | 142 | timer(d) |
|
117 | 143 | |
|
144 | @command('perfindex') | |
|
118 | 145 | def perfindex(ui, repo): |
|
119 | 146 | import mercurial.revlog |
|
120 | 147 | mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg |
@@ -124,12 +151,14 b' def perfindex(ui, repo):' | |||
|
124 | 151 | cl.rev(n) |
|
125 | 152 | timer(d) |
|
126 | 153 | |
|
154 | @command('perfstartup') | |
|
127 | 155 | def perfstartup(ui, repo): |
|
128 | 156 | cmd = sys.argv[0] |
|
129 | 157 | def d(): |
|
130 | 158 | os.system("HGRCPATH= %s version -q > /dev/null" % cmd) |
|
131 | 159 | timer(d) |
|
132 | 160 | |
|
161 | @command('perfparents') | |
|
133 | 162 | def perfparents(ui, repo): |
|
134 | 163 | nl = [repo.changelog.node(i) for i in xrange(1000)] |
|
135 | 164 | def d(): |
@@ -137,22 +166,16 b' def perfparents(ui, repo):' | |||
|
137 | 166 | repo.changelog.parents(n) |
|
138 | 167 | timer(d) |
|
139 | 168 | |
|
169 | @command('perflookup') | |
|
140 | 170 | def perflookup(ui, repo, rev): |
|
141 | 171 | timer(lambda: len(repo.lookup(rev))) |
|
142 | 172 | |
|
173 | @command('perfrevrange') | |
|
143 | 174 | def perfrevrange(ui, repo, *specs): |
|
144 | 175 | revrange = scmutil.revrange |
|
145 | 176 | timer(lambda: len(revrange(repo, specs))) |
|
146 | 177 | |
|
147 | def perfnodelookup(ui, repo, rev): | |
|
148 | import mercurial.revlog | |
|
149 | mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg | |
|
150 | n = repo[rev].node() | |
|
151 | def d(): | |
|
152 | cl = mercurial.revlog.revlog(repo.sopener, "00changelog.i") | |
|
153 | cl.rev(n) | |
|
154 | timer(d) | |
|
155 | ||
|
178 | @command('perfnodelookup') | |
|
156 | 179 | def perfnodelookup(ui, repo, rev): |
|
157 | 180 | import mercurial.revlog |
|
158 | 181 | mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg |
@@ -163,12 +186,15 b' def perfnodelookup(ui, repo, rev):' | |||
|
163 | 186 | clearcaches(cl) |
|
164 | 187 | timer(d) |
|
165 | 188 | |
|
189 | @command('perflog', | |
|
190 | [('', 'rename', False, 'ask log to follow renames')]) | |
|
166 | 191 | def perflog(ui, repo, **opts): |
|
167 | 192 | ui.pushbuffer() |
|
168 | 193 | timer(lambda: commands.log(ui, repo, rev=[], date='', user='', |
|
169 | 194 | copies=opts.get('rename'))) |
|
170 | 195 | ui.popbuffer() |
|
171 | 196 | |
|
197 | @command('perftemplating') | |
|
172 | 198 | def perftemplating(ui, repo): |
|
173 | 199 | ui.pushbuffer() |
|
174 | 200 | timer(lambda: commands.log(ui, repo, rev=[], date='', user='', |
@@ -176,15 +202,18 b' def perftemplating(ui, repo):' | |||
|
176 | 202 | ' {author|person}: {desc|firstline}\n')) |
|
177 | 203 | ui.popbuffer() |
|
178 | 204 | |
|
205 | @command('perfcca') | |
|
179 | 206 | def perfcca(ui, repo): |
|
180 | 207 | timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate)) |
|
181 | 208 | |
|
209 | @command('perffncacheload') | |
|
182 | 210 | def perffncacheload(ui, repo): |
|
183 | 211 | s = repo.store |
|
184 | 212 | def d(): |
|
185 | 213 | s.fncache._load() |
|
186 | 214 | timer(d) |
|
187 | 215 | |
|
216 | @command('perffncachewrite') | |
|
188 | 217 | def perffncachewrite(ui, repo): |
|
189 | 218 | s = repo.store |
|
190 | 219 | s.fncache._load() |
@@ -193,6 +222,7 b' def perffncachewrite(ui, repo):' | |||
|
193 | 222 | s.fncache.write() |
|
194 | 223 | timer(d) |
|
195 | 224 | |
|
225 | @command('perffncacheencode') | |
|
196 | 226 | def perffncacheencode(ui, repo): |
|
197 | 227 | s = repo.store |
|
198 | 228 | s.fncache._load() |
@@ -201,6 +231,7 b' def perffncacheencode(ui, repo):' | |||
|
201 | 231 | s.encode(p) |
|
202 | 232 | timer(d) |
|
203 | 233 | |
|
234 | @command('perfdiffwd') | |
|
204 | 235 | def perfdiffwd(ui, repo): |
|
205 | 236 | """Profile diff of working directory changes""" |
|
206 | 237 | options = { |
@@ -218,6 +249,9 b' def perfdiffwd(ui, repo):' | |||
|
218 | 249 | title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none') |
|
219 | 250 | timer(d, title) |
|
220 | 251 | |
|
252 | @command('perfrevlog', | |
|
253 | [('d', 'dist', 100, 'distance between the revisions')], | |
|
254 | "[INDEXFILE]") | |
|
221 | 255 | def perfrevlog(ui, repo, file_, **opts): |
|
222 | 256 | from mercurial import revlog |
|
223 | 257 | dist = opts['dist'] |
@@ -228,32 +262,105 b' def perfrevlog(ui, repo, file_, **opts):' | |||
|
228 | 262 | |
|
229 | 263 | timer(d) |
|
230 | 264 | |
|
231 | cmdtable = { | |
|
232 | 'perfcca': (perfcca, []), | |
|
233 | 'perffncacheload': (perffncacheload, []), | |
|
234 | 'perffncachewrite': (perffncachewrite, []), | |
|
235 | 'perffncacheencode': (perffncacheencode, []), | |
|
236 | 'perflookup': (perflookup, []), | |
|
237 | 'perfrevrange': (perfrevrange, []), | |
|
238 | 'perfnodelookup': (perfnodelookup, []), | |
|
239 | 'perfparents': (perfparents, []), | |
|
240 | 'perfstartup': (perfstartup, []), | |
|
241 | 'perfstatus': (perfstatus, []), | |
|
242 | 'perfwalk': (perfwalk, []), | |
|
243 | 'perfmanifest': (perfmanifest, []), | |
|
244 | 'perfchangeset': (perfchangeset, []), | |
|
245 | 'perfindex': (perfindex, []), | |
|
246 | 'perfheads': (perfheads, []), | |
|
247 | 'perftags': (perftags, []), | |
|
248 | 'perfancestors': (perfancestors, []), | |
|
249 | 'perfdirstate': (perfdirstate, []), | |
|
250 | 'perfdirstatedirs': (perfdirstate, []), | |
|
251 | 'perfdirstatewrite': (perfdirstatewrite, []), | |
|
252 | 'perflog': (perflog, | |
|
253 | [('', 'rename', False, 'ask log to follow renames')]), | |
|
254 | 'perftemplating': (perftemplating, []), | |
|
255 | 'perfdiffwd': (perfdiffwd, []), | |
|
256 | 'perfrevlog': (perfrevlog, | |
|
257 | [('d', 'dist', 100, 'distance between the revisions')], | |
|
258 | "[INDEXFILE]"), | |
|
259 | } | |
|
265 | @command('perfrevset', | |
|
266 | [('C', 'clear', False, 'clear volatile cache between each call.')], | |
|
267 | "REVSET") | |
|
268 | def perfrevset(ui, repo, expr, clear=False): | |
|
269 | """benchmark the execution time of a revset | |
|
270 | ||
|
271 | Use the --clean option if need to evaluate the impact of build volative | |
|
272 | revisions set cache on the revset execution. Volatile cache hold filtered | |
|
273 | and obsolete related cache.""" | |
|
274 | def d(): | |
|
275 | if clear: | |
|
276 | repo.invalidatevolatilesets() | |
|
277 | repo.revs(expr) | |
|
278 | timer(d) | |
|
279 | ||
|
280 | @command('perfvolatilesets') | |
|
281 | def perfvolatilesets(ui, repo, *names): | |
|
282 | """benchmark the computation of various volatile set | |
|
283 | ||
|
284 | Volatile set computes element related to filtering and obsolescence.""" | |
|
285 | repo = repo.unfiltered() | |
|
286 | ||
|
287 | def getobs(name): | |
|
288 | def d(): | |
|
289 | repo.invalidatevolatilesets() | |
|
290 | obsolete.getrevs(repo, name) | |
|
291 | return d | |
|
292 | ||
|
293 | allobs = sorted(obsolete.cachefuncs) | |
|
294 | if names: | |
|
295 | allobs = [n for n in allobs if n in names] | |
|
296 | ||
|
297 | for name in allobs: | |
|
298 | timer(getobs(name), title=name) | |
|
299 | ||
|
300 | def getfiltered(name): | |
|
301 | def d(): | |
|
302 | repo.invalidatevolatilesets() | |
|
303 | repoview.filteredrevs(repo, name) | |
|
304 | return d | |
|
305 | ||
|
306 | allfilter = sorted(repoview.filtertable) | |
|
307 | if names: | |
|
308 | allfilter = [n for n in allfilter if n in names] | |
|
309 | ||
|
310 | for name in allfilter: | |
|
311 | timer(getfiltered(name), title=name) | |
|
312 | ||
|
313 | @command('perfbranchmap', | |
|
314 | [('f', 'full', False, | |
|
315 | 'Includes build time of subset'), | |
|
316 | ]) | |
|
317 | def perfbranchmap(ui, repo, full=False): | |
|
318 | """benchmark the update of a branchmap | |
|
319 | ||
|
320 | This benchmarks the full repo.branchmap() call with read and write disabled | |
|
321 | """ | |
|
322 | def getbranchmap(filtername): | |
|
323 | """generate a benchmark function for the filtername""" | |
|
324 | if filtername is None: | |
|
325 | view = repo | |
|
326 | else: | |
|
327 | view = repo.filtered(filtername) | |
|
328 | def d(): | |
|
329 | if full: | |
|
330 | view._branchcaches.clear() | |
|
331 | else: | |
|
332 | view._branchcaches.pop(filtername, None) | |
|
333 | view.branchmap() | |
|
334 | return d | |
|
335 | # add filter in smaller subset to bigger subset | |
|
336 | possiblefilters = set(repoview.filtertable) | |
|
337 | allfilters = [] | |
|
338 | while possiblefilters: | |
|
339 | for name in possiblefilters: | |
|
340 | subset = repoview.subsettable.get(name) | |
|
341 | if subset not in possiblefilters: | |
|
342 | break | |
|
343 | else: | |
|
344 | assert False, 'subset cycle %s!' % possiblefilters | |
|
345 | allfilters.append(name) | |
|
346 | possiblefilters.remove(name) | |
|
347 | ||
|
348 | # warm the cache | |
|
349 | if not full: | |
|
350 | for name in allfilters: | |
|
351 | repo.filtered(name).branchmap() | |
|
352 | # add unfiltered | |
|
353 | allfilters.append(None) | |
|
354 | oldread = branchmap.read | |
|
355 | oldwrite = branchmap.branchcache.write | |
|
356 | try: | |
|
357 | branchmap.read = lambda repo: None | |
|
358 | branchmap.write = lambda repo: None | |
|
359 | for name in allfilters: | |
|
360 | timer(getbranchmap(name), title=str(name)) | |
|
361 | finally: | |
|
362 | branchmap.read = oldread | |
|
363 | branchmap.branchcache.write = oldwrite | |
|
364 | ||
|
365 | ||
|
366 |
@@ -231,6 +231,8 b' def synthesize(ui, repo, descpath, **opt' | |||
|
231 | 231 | fp.close() |
|
232 | 232 | |
|
233 | 233 | def cdf(l): |
|
234 | if not l: | |
|
235 | return [], [] | |
|
234 | 236 | vals, probs = zip(*sorted(l, key=lambda x: x[1], reverse=True)) |
|
235 | 237 | t = float(sum(probs, 0)) |
|
236 | 238 | s, cdfs = 0, [] |
@@ -2,7 +2,8 b'' | |||
|
2 | 2 | " Language: Mercurial unified tests |
|
3 | 3 | " Author: Steve Losh (steve@stevelosh.com) |
|
4 | 4 | " |
|
5 | " Add the following line to your ~/.vimrc to enable: | |
|
5 | " Place this file in ~/.vim/syntax/ and add the following line to your | |
|
6 | " ~/.vimrc to enable: | |
|
6 | 7 | " au BufNewFile,BufRead *.t set filetype=hgtest |
|
7 | 8 | " |
|
8 | 9 | " If you want folding you'll need the following line as well: |
@@ -174,11 +174,10 b' typeset -A _hg_cmd_globals' | |||
|
174 | 174 | |
|
175 | 175 | _hg_cmd tags | while read tag |
|
176 | 176 | do |
|
177 |
tags+=(${tag/ # |
|
|
177 | tags+=(${tag/ #[0-9]#:*}) | |
|
178 | 178 | done |
|
179 | (( $#tags )) && _describe -t tags 'tags' tags | |
|
179 | (( $#tags )) && _describe -t tags 'tags' tags | |
|
180 | 180 | } |
|
181 | ||
|
182 | 181 | _hg_bookmarks() { |
|
183 | 182 | typeset -a bookmark bookmarks |
|
184 | 183 | |
@@ -198,7 +197,7 b' typeset -A _hg_cmd_globals' | |||
|
198 | 197 | |
|
199 | 198 | _hg_cmd branches | while read branch |
|
200 | 199 | do |
|
201 |
branches+=(${branch/ # |
|
|
200 | branches+=(${branch/ #[0-9]#:*}) | |
|
202 | 201 | done |
|
203 | 202 | (( $#branches )) && _describe -t branches 'branches' branches |
|
204 | 203 | } |
@@ -208,12 +207,19 b' typeset -A _hg_cmd_globals' | |||
|
208 | 207 | typeset -a heads |
|
209 | 208 | local myrev |
|
210 | 209 | |
|
211 | heads=(${(f)"$(_hg_cmd heads --template '{rev}\\n')"}) | |
|
210 | heads=(${(f)"$(_hg_cmd heads --template '{rev}:{branch}\\n')"}) | |
|
212 | 211 | # exclude own revision |
|
213 | myrev=$(_hg_cmd log -r . --template '{rev}\\n') | |
|
212 | myrev=$(_hg_cmd log -r . --template '{rev}:{branch}\\n') | |
|
214 | 213 | heads=(${heads:#$myrev}) |
|
215 | 214 | |
|
216 | 215 | (( $#heads )) && _describe -t heads 'heads' heads |
|
216 | ||
|
217 | branches=(${(f)"$(_hg_cmd heads --template '{branch}\\n')"}) | |
|
218 | # exclude own revision | |
|
219 | myrev=$(_hg_cmd log -r . --template '{branch}\\n') | |
|
220 | branches=(${branches:#$myrev}) | |
|
221 | ||
|
222 | (( $#branches )) && _describe -t branches 'branches' branches | |
|
217 | 223 | } |
|
218 | 224 | |
|
219 | 225 | _hg_files() { |
@@ -146,7 +146,7 b' class Table(object):' | |||
|
146 | 146 | text.extend(cell) |
|
147 | 147 | if not text[-1].endswith('\n'): |
|
148 | 148 | text[-1] += '\n' |
|
149 | if i < len(row)-1: | |
|
149 | if i < len(row) - 1: | |
|
150 | 150 | text.append('T}'+self._tab_char+'T{\n') |
|
151 | 151 | else: |
|
152 | 152 | text.append('T}\n') |
@@ -258,7 +258,7 b' class Translator(nodes.NodeVisitor):' | |||
|
258 | 258 | # ensure we get a ".TH" as viewers require it. |
|
259 | 259 | self.head.append(self.header()) |
|
260 | 260 | # filter body |
|
261 | for i in xrange(len(self.body)-1, 0, -1): | |
|
261 | for i in xrange(len(self.body) - 1, 0, -1): | |
|
262 | 262 | # remove superfluous vertical gaps. |
|
263 | 263 | if self.body[i] == '.sp\n': |
|
264 | 264 | if self.body[i - 1][:4] in ('.BI ','.IP '): |
@@ -880,7 +880,7 b' class Translator(nodes.NodeVisitor):' | |||
|
880 | 880 | self.context[-3] = '.BI' # bold/italic alternate |
|
881 | 881 | if node['delimiter'] != ' ': |
|
882 | 882 | self.body.append('\\fB%s ' % node['delimiter']) |
|
883 | elif self.body[len(self.body)-1].endswith('='): | |
|
883 | elif self.body[len(self.body) - 1].endswith('='): | |
|
884 | 884 | # a blank only means no blank in output, just changing font |
|
885 | 885 | self.body.append(' ') |
|
886 | 886 | else: |
@@ -144,8 +144,10 b' def churn(ui, repo, *pats, **opts):' | |||
|
144 | 144 | if not rate: |
|
145 | 145 | return |
|
146 | 146 | |
|
147 | sortkey = ((not opts.get('sort')) and (lambda x: -sum(x[1])) or None) | |
|
148 |
rate.sort( |
|
|
147 | if opts.get('sort'): | |
|
148 | rate.sort() | |
|
149 | else: | |
|
150 | rate.sort(key=lambda x: (-sum(x[1]), x)) | |
|
149 | 151 | |
|
150 | 152 | # Be careful not to have a zero maxcount (issue833) |
|
151 | 153 | maxcount = float(max(sum(v) for k, v in rate)) or 1.0 |
@@ -103,6 +103,7 b' disable color.' | |||
|
103 | 103 | import os |
|
104 | 104 | |
|
105 | 105 | from mercurial import commands, dispatch, extensions, ui as uimod, util |
|
106 | from mercurial import templater | |
|
106 | 107 | from mercurial.i18n import _ |
|
107 | 108 | |
|
108 | 109 | testedwith = 'internal' |
@@ -354,6 +355,28 b' class colorui(uimod.ui):' | |||
|
354 | 355 | for s in msg.split('\n')]) |
|
355 | 356 | return msg |
|
356 | 357 | |
|
358 | def templatelabel(context, mapping, args): | |
|
359 | if len(args) != 2: | |
|
360 | # i18n: "label" is a keyword | |
|
361 | raise error.ParseError(_("label expects two arguments")) | |
|
362 | ||
|
363 | thing = templater.stringify(args[1][0](context, mapping, args[1][1])) | |
|
364 | thing = templater.runtemplate(context, mapping, | |
|
365 | templater.compiletemplate(thing, context)) | |
|
366 | ||
|
367 | # apparently, repo could be a string that is the favicon? | |
|
368 | repo = mapping.get('repo', '') | |
|
369 | if isinstance(repo, str): | |
|
370 | return thing | |
|
371 | ||
|
372 | label = templater.stringify(args[0][0](context, mapping, args[0][1])) | |
|
373 | label = templater.runtemplate(context, mapping, | |
|
374 | templater.compiletemplate(label, context)) | |
|
375 | ||
|
376 | thing = templater.stringify(thing) | |
|
377 | label = templater.stringify(label) | |
|
378 | ||
|
379 | return repo.ui.label(thing, label) | |
|
357 | 380 | |
|
358 | 381 | def uisetup(ui): |
|
359 | 382 | global _terminfo_params |
@@ -370,6 +393,7 b' def uisetup(ui):' | |||
|
370 | 393 | configstyles(ui_) |
|
371 | 394 | return orig(ui_, opts, cmd, cmdfunc) |
|
372 | 395 | extensions.wrapfunction(dispatch, '_runcommand', colorcmd) |
|
396 | templater.funcs['label'] = templatelabel | |
|
373 | 397 | |
|
374 | 398 | def extsetup(ui): |
|
375 | 399 | commands.globalopts.append( |
@@ -191,6 +191,10 b' def convert(ui, src, dest=None, revmapfi' | |||
|
191 | 191 | branch indicated in the regex as the second parent of the |
|
192 | 192 | changeset. Default is ``{{mergefrombranch ([-\\w]+)}}`` |
|
193 | 193 | |
|
194 | :convert.localtimezone: use local time (as determined by the TZ | |
|
195 | environment variable) for changeset date/times. The default | |
|
196 | is False (use UTC). | |
|
197 | ||
|
194 | 198 | :hooks.cvslog: Specify a Python function to be called at the end of |
|
195 | 199 | gathering the CVS log. The function is passed a list with the |
|
196 | 200 | log entries, and can modify the entries in-place, or add or |
@@ -231,6 +235,10 b' def convert(ui, src, dest=None, revmapfi' | |||
|
231 | 235 | :convert.svn.trunk: specify the name of the trunk branch. The |
|
232 | 236 | default is ``trunk``. |
|
233 | 237 | |
|
238 | :convert.localtimezone: use local time (as determined by the TZ | |
|
239 | environment variable) for changeset date/times. The default | |
|
240 | is False (use UTC). | |
|
241 | ||
|
234 | 242 | Source history can be retrieved starting at a specific revision, |
|
235 | 243 | instead of being integrally converted. Only single branch |
|
236 | 244 | conversions are supported. |
@@ -5,7 +5,7 b'' | |||
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | import base64, errno, subprocess, os | |
|
8 | import base64, errno, subprocess, os, datetime | |
|
9 | 9 | import cPickle as pickle |
|
10 | 10 | from mercurial import util |
|
11 | 11 | from mercurial.i18n import _ |
@@ -446,3 +446,10 b' def parsesplicemap(path):' | |||
|
446 | 446 | if e.errno != errno.ENOENT: |
|
447 | 447 | raise |
|
448 | 448 | return m |
|
449 | ||
|
450 | def makedatetimestamp(t): | |
|
451 | """Like util.makedate() but for time t instead of current time""" | |
|
452 | delta = (datetime.datetime.utcfromtimestamp(t) - | |
|
453 | datetime.datetime.fromtimestamp(t)) | |
|
454 | tz = delta.days * 86400 + delta.seconds | |
|
455 | return t, tz |
@@ -147,7 +147,7 b' class converter(object):' | |||
|
147 | 147 | map contains valid revision identifiers and merge the new |
|
148 | 148 | links in the source graph. |
|
149 | 149 | """ |
|
150 | for c in splicemap: | |
|
150 | for c in sorted(splicemap): | |
|
151 | 151 | if c not in parents: |
|
152 | 152 | if not self.dest.hascommit(self.map.get(c, c)): |
|
153 | 153 | # Could be in source but not converted during this run |
@@ -175,7 +175,7 b' class converter(object):' | |||
|
175 | 175 | revisions without parents. 'parents' must be a mapping of revision |
|
176 | 176 | identifier to its parents ones. |
|
177 | 177 | """ |
|
178 |
visit = parents |
|
|
178 | visit = sorted(parents) | |
|
179 | 179 | seen = set() |
|
180 | 180 | children = {} |
|
181 | 181 | roots = [] |
@@ -11,6 +11,7 b' from mercurial import encoding, util' | |||
|
11 | 11 | from mercurial.i18n import _ |
|
12 | 12 | |
|
13 | 13 | from common import NoRepo, commit, converter_source, checktool |
|
14 | from common import makedatetimestamp | |
|
14 | 15 | import cvsps |
|
15 | 16 | |
|
16 | 17 | class convert_cvs(converter_source): |
@@ -70,6 +71,8 b' class convert_cvs(converter_source):' | |||
|
70 | 71 | cs.author = self.recode(cs.author) |
|
71 | 72 | self.lastbranch[cs.branch] = id |
|
72 | 73 | cs.comment = self.recode(cs.comment) |
|
74 | if self.ui.configbool('convert', 'localtimezone'): | |
|
75 | cs.date = makedatetimestamp(cs.date[0]) | |
|
73 | 76 | date = util.datestr(cs.date, '%Y-%m-%d %H:%M:%S %1%2') |
|
74 | 77 | self.tags.update(dict.fromkeys(cs.tags, id)) |
|
75 | 78 |
@@ -19,6 +19,7 b' class logentry(object):' | |||
|
19 | 19 | .branch - name of branch this revision is on |
|
20 | 20 | .branches - revision tuple of branches starting at this revision |
|
21 | 21 | .comment - commit message |
|
22 | .commitid - CVS commitid or None | |
|
22 | 23 | .date - the commit date as a (time, tz) tuple |
|
23 | 24 | .dead - true if file revision is dead |
|
24 | 25 | .file - Name of file |
@@ -28,19 +29,17 b' class logentry(object):' | |||
|
28 | 29 | .revision - revision number as tuple |
|
29 | 30 | .tags - list of tags on the file |
|
30 | 31 | .synthetic - is this a synthetic "file ... added on ..." revision? |
|
31 | .mergepoint- the branch that has been merged from | |
|
32 |
|
|
|
33 | .branchpoints- the branches that start at the current entry | |
|
32 | .mergepoint - the branch that has been merged from (if present in | |
|
33 | rlog output) or None | |
|
34 | .branchpoints - the branches that start at the current entry or empty | |
|
34 | 35 | ''' |
|
35 | 36 | def __init__(self, **entries): |
|
36 | 37 | self.synthetic = False |
|
37 | 38 | self.__dict__.update(entries) |
|
38 | 39 | |
|
39 | 40 | def __repr__(self): |
|
40 | return "<%s at 0x%x: %s %s>" % (self.__class__.__name__, | |
|
41 | id(self), | |
|
42 | self.file, | |
|
43 | ".".join(map(str, self.revision))) | |
|
41 | items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__)) | |
|
42 | return "%s(%s)"%(type(self).__name__, ", ".join(items)) | |
|
44 | 43 | |
|
45 | 44 | class logerror(Exception): |
|
46 | 45 | pass |
@@ -113,6 +112,7 b' def createlog(ui, directory=None, root="' | |||
|
113 | 112 | re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$') |
|
114 | 113 | re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);' |
|
115 | 114 | r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?' |
|
115 | r'(\s+commitid:\s+([^;]+);)?' | |
|
116 | 116 | r'(.*mergepoint:\s+([^;]+);)?') |
|
117 | 117 | re_70 = re.compile('branches: (.+);$') |
|
118 | 118 | |
@@ -171,6 +171,14 b' def createlog(ui, directory=None, root="' | |||
|
171 | 171 | try: |
|
172 | 172 | ui.note(_('reading cvs log cache %s\n') % cachefile) |
|
173 | 173 | oldlog = pickle.load(open(cachefile)) |
|
174 | for e in oldlog: | |
|
175 | if not (util.safehasattr(e, 'branchpoints') and | |
|
176 | util.safehasattr(e, 'commitid') and | |
|
177 | util.safehasattr(e, 'mergepoint')): | |
|
178 | ui.status(_('ignoring old cache\n')) | |
|
179 | oldlog = [] | |
|
180 | break | |
|
181 | ||
|
174 | 182 | ui.note(_('cache has %d log entries\n') % len(oldlog)) |
|
175 | 183 | except Exception, e: |
|
176 | 184 | ui.note(_('error reading cache: %r\n') % e) |
@@ -296,9 +304,16 b' def createlog(ui, directory=None, root="' | |||
|
296 | 304 | # as this state is re-entered for subsequent revisions of a file. |
|
297 | 305 | match = re_50.match(line) |
|
298 | 306 | assert match, _('expected revision number') |
|
299 |
e = logentry(rcs=scache(rcs), |
|
|
300 | revision=tuple([int(x) for x in match.group(1).split('.')]), | |
|
301 | branches=[], parent=None) | |
|
307 | e = logentry(rcs=scache(rcs), | |
|
308 | file=scache(filename), | |
|
309 | revision=tuple([int(x) for x in | |
|
310 | match.group(1).split('.')]), | |
|
311 | branches=[], | |
|
312 | parent=None, | |
|
313 | commitid=None, | |
|
314 | mergepoint=None, | |
|
315 | branchpoints=set()) | |
|
316 | ||
|
302 | 317 | state = 6 |
|
303 | 318 | |
|
304 | 319 | elif state == 6: |
@@ -329,8 +344,11 b' def createlog(ui, directory=None, root="' | |||
|
329 | 344 | else: |
|
330 | 345 | e.lines = None |
|
331 | 346 | |
|
332 |
if match.group(7): # cvs |
|
|
333 |
|
|
|
347 | if match.group(7): # cvs 1.12 commitid | |
|
348 | e.commitid = match.group(8) | |
|
349 | ||
|
350 | if match.group(9): # cvsnt mergepoint | |
|
351 | myrev = match.group(10).split('.') | |
|
334 | 352 | if len(myrev) == 2: # head |
|
335 | 353 | e.mergepoint = 'HEAD' |
|
336 | 354 | else: |
@@ -339,8 +357,7 b' def createlog(ui, directory=None, root="' | |||
|
339 | 357 | assert len(branches) == 1, ('unknown branch: %s' |
|
340 | 358 | % e.mergepoint) |
|
341 | 359 | e.mergepoint = branches[0] |
|
342 | else: | |
|
343 | e.mergepoint = None | |
|
360 | ||
|
344 | 361 | e.comment = [] |
|
345 | 362 | state = 7 |
|
346 | 363 | |
@@ -469,23 +486,22 b' class changeset(object):' | |||
|
469 | 486 | .author - author name as CVS knows it |
|
470 | 487 | .branch - name of branch this changeset is on, or None |
|
471 | 488 | .comment - commit message |
|
489 | .commitid - CVS commitid or None | |
|
472 | 490 | .date - the commit date as a (time,tz) tuple |
|
473 | 491 | .entries - list of logentry objects in this changeset |
|
474 | 492 | .parents - list of one or two parent changesets |
|
475 | 493 | .tags - list of tags on this changeset |
|
476 | 494 | .synthetic - from synthetic revision "file ... added on branch ..." |
|
477 | .mergepoint- the branch that has been merged from | |
|
478 | (if present in rlog output) | |
|
479 | .branchpoints- the branches that start at the current entry | |
|
495 | .mergepoint- the branch that has been merged from or None | |
|
496 | .branchpoints- the branches that start at the current entry or empty | |
|
480 | 497 | ''' |
|
481 | 498 | def __init__(self, **entries): |
|
482 | 499 | self.synthetic = False |
|
483 | 500 | self.__dict__.update(entries) |
|
484 | 501 | |
|
485 | 502 | def __repr__(self): |
|
486 | return "<%s at 0x%x: %s>" % (self.__class__.__name__, | |
|
487 | id(self), | |
|
488 | getattr(self, 'id', "(no id)")) | |
|
503 | items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__)) | |
|
504 | return "%s(%s)"%(type(self).__name__, ", ".join(items)) | |
|
489 | 505 | |
|
490 | 506 | def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None): |
|
491 | 507 | '''Convert log into changesets.''' |
@@ -493,8 +509,8 b' def createchangeset(ui, log, fuzz=60, me' | |||
|
493 | 509 | ui.status(_('creating changesets\n')) |
|
494 | 510 | |
|
495 | 511 | # Merge changesets |
|
496 | ||
|
497 | log.sort(key=lambda x: (x.comment, x.author, x.branch, x.date)) | |
|
512 | log.sort(key=lambda x: (x.commitid, x.comment, x.author, x.branch, x.date, | |
|
513 | x.branchpoints)) | |
|
498 | 514 | |
|
499 | 515 | changesets = [] |
|
500 | 516 | files = set() |
@@ -517,22 +533,24 b' def createchangeset(ui, log, fuzz=60, me' | |||
|
517 | 533 | # first changeset and bar the next and MYBRANCH and MYBRANCH2 |
|
518 | 534 | # should both start off of the bar changeset. No provisions are |
|
519 | 535 | # made to ensure that this is, in fact, what happens. |
|
520 | if not (c and | |
|
521 |
|
|
|
522 | e.author == c.author and | |
|
523 | e.branch == c.branch and | |
|
524 | (not util.safehasattr(e, 'branchpoints') or | |
|
525 | not util.safehasattr (c, 'branchpoints') or | |
|
526 |
|
|
|
527 | ((c.date[0] + c.date[1]) <= | |
|
528 |
( |
|
|
529 |
( |
|
|
530 | e.file not in files): | |
|
536 | if not (c and e.branchpoints == c.branchpoints and | |
|
537 | (# cvs commitids | |
|
538 | (e.commitid is not None and e.commitid == c.commitid) or | |
|
539 | (# no commitids, use fuzzy commit detection | |
|
540 | (e.commitid is None or c.commitid is None) and | |
|
541 | e.comment == c.comment and | |
|
542 | e.author == c.author and | |
|
543 | e.branch == c.branch and | |
|
544 | ((c.date[0] + c.date[1]) <= | |
|
545 | (e.date[0] + e.date[1]) <= | |
|
546 | (c.date[0] + c.date[1]) + fuzz) and | |
|
547 | e.file not in files))): | |
|
531 | 548 | c = changeset(comment=e.comment, author=e.author, |
|
532 |
branch=e.branch, date=e.date, |
|
|
533 |
mergepoint= |
|
|
534 |
branchpoints= |
|
|
549 | branch=e.branch, date=e.date, | |
|
550 | entries=[], mergepoint=e.mergepoint, | |
|
551 | branchpoints=e.branchpoints, commitid=e.commitid) | |
|
535 | 552 | changesets.append(c) |
|
553 | ||
|
536 | 554 | files = set() |
|
537 | 555 | if len(changesets) % 100 == 0: |
|
538 | 556 | t = '%d %s' % (len(changesets), repr(e.comment)[1:-1]) |
@@ -801,22 +819,22 b' def debugcvsps(ui, *args, **opts):' | |||
|
801 | 819 | # Note: trailing spaces on several lines here are needed to have |
|
802 | 820 | # bug-for-bug compatibility with cvsps. |
|
803 | 821 | ui.write('---------------------\n') |
|
804 | ui.write('PatchSet %d \n' % cs.id) | |
|
805 | ui.write('Date: %s\n' % util.datestr(cs.date, | |
|
806 | '%Y/%m/%d %H:%M:%S %1%2')) | |
|
807 | ui.write('Author: %s\n' % cs.author) | |
|
808 | ui.write('Branch: %s\n' % (cs.branch or 'HEAD')) | |
|
809 | ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1], | |
|
810 | ','.join(cs.tags) or '(none)')) | |
|
811 | branchpoints = getattr(cs, 'branchpoints', None) | |
|
812 |
|
|
|
813 |
|
|
|
822 | ui.write(('PatchSet %d \n' % cs.id)) | |
|
823 | ui.write(('Date: %s\n' % util.datestr(cs.date, | |
|
824 | '%Y/%m/%d %H:%M:%S %1%2'))) | |
|
825 | ui.write(('Author: %s\n' % cs.author)) | |
|
826 | ui.write(('Branch: %s\n' % (cs.branch or 'HEAD'))) | |
|
827 | ui.write(('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1], | |
|
828 | ','.join(cs.tags) or '(none)'))) | |
|
829 | if cs.branchpoints: | |
|
830 | ui.write(('Branchpoints: %s \n') % | |
|
831 | ', '.join(sorted(cs.branchpoints))) | |
|
814 | 832 | if opts["parents"] and cs.parents: |
|
815 | 833 | if len(cs.parents) > 1: |
|
816 | ui.write('Parents: %s\n' % | |
|
817 | (','.join([str(p.id) for p in cs.parents]))) | |
|
834 | ui.write(('Parents: %s\n' % | |
|
835 | (','.join([str(p.id) for p in cs.parents])))) | |
|
818 | 836 | else: |
|
819 | ui.write('Parent: %d\n' % cs.parents[0].id) | |
|
837 | ui.write(('Parent: %d\n' % cs.parents[0].id)) | |
|
820 | 838 | |
|
821 | 839 | if opts["ancestors"]: |
|
822 | 840 | b = cs.branch |
@@ -825,11 +843,11 b' def debugcvsps(ui, *args, **opts):' | |||
|
825 | 843 | b, c = ancestors[b] |
|
826 | 844 | r.append('%s:%d:%d' % (b or "HEAD", c, branches[b])) |
|
827 | 845 | if r: |
|
828 | ui.write('Ancestors: %s\n' % (','.join(r))) | |
|
846 | ui.write(('Ancestors: %s\n' % (','.join(r)))) | |
|
829 | 847 | |
|
830 | ui.write('Log:\n') | |
|
848 | ui.write(('Log:\n')) | |
|
831 | 849 | ui.write('%s\n\n' % cs.comment) |
|
832 | ui.write('Members: \n') | |
|
850 | ui.write(('Members: \n')) | |
|
833 | 851 | for f in cs.entries: |
|
834 | 852 | fn = f.file |
|
835 | 853 | if fn.startswith(opts["prefix"]): |
@@ -6,12 +6,24 b'' | |||
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | import os |
|
9 | from mercurial import util | |
|
9 | from mercurial import util, config | |
|
10 | 10 | from mercurial.node import hex, nullid |
|
11 | 11 | from mercurial.i18n import _ |
|
12 | 12 | |
|
13 | 13 | from common import NoRepo, commit, converter_source, checktool |
|
14 | 14 | |
|
15 | class submodule(object): | |
|
16 | def __init__(self, path, node, url): | |
|
17 | self.path = path | |
|
18 | self.node = node | |
|
19 | self.url = url | |
|
20 | ||
|
21 | def hgsub(self): | |
|
22 | return "%s = [git]%s" % (self.path, self.url) | |
|
23 | ||
|
24 | def hgsubstate(self): | |
|
25 | return "%s %s" % (self.node, self.path) | |
|
26 | ||
|
15 | 27 | class convert_git(converter_source): |
|
16 | 28 | # Windows does not support GIT_DIR= construct while other systems |
|
17 | 29 | # cannot remove environment variable. Just assume none have |
@@ -55,6 +67,7 b' class convert_git(converter_source):' | |||
|
55 | 67 | checktool('git', 'git') |
|
56 | 68 | |
|
57 | 69 | self.path = path |
|
70 | self.submodules = [] | |
|
58 | 71 | |
|
59 | 72 | def getheads(self): |
|
60 | 73 | if not self.rev: |
@@ -76,16 +89,57 b' class convert_git(converter_source):' | |||
|
76 | 89 | return data |
|
77 | 90 | |
|
78 | 91 | def getfile(self, name, rev): |
|
79 | data = self.catfile(rev, "blob") | |
|
80 | mode = self.modecache[(name, rev)] | |
|
92 | if name == '.hgsub': | |
|
93 | data = '\n'.join([m.hgsub() for m in self.submoditer()]) | |
|
94 | mode = '' | |
|
95 | elif name == '.hgsubstate': | |
|
96 | data = '\n'.join([m.hgsubstate() for m in self.submoditer()]) | |
|
97 | mode = '' | |
|
98 | else: | |
|
99 | data = self.catfile(rev, "blob") | |
|
100 | mode = self.modecache[(name, rev)] | |
|
81 | 101 | return data, mode |
|
82 | 102 | |
|
103 | def submoditer(self): | |
|
104 | null = hex(nullid) | |
|
105 | for m in sorted(self.submodules, key=lambda p: p.path): | |
|
106 | if m.node != null: | |
|
107 | yield m | |
|
108 | ||
|
109 | def parsegitmodules(self, content): | |
|
110 | """Parse the formatted .gitmodules file, example file format: | |
|
111 | [submodule "sub"]\n | |
|
112 | \tpath = sub\n | |
|
113 | \turl = git://giturl\n | |
|
114 | """ | |
|
115 | self.submodules = [] | |
|
116 | c = config.config() | |
|
117 | # Each item in .gitmodules starts with \t that cant be parsed | |
|
118 | c.parse('.gitmodules', content.replace('\t','')) | |
|
119 | for sec in c.sections(): | |
|
120 | s = c[sec] | |
|
121 | if 'url' in s and 'path' in s: | |
|
122 | self.submodules.append(submodule(s['path'], '', s['url'])) | |
|
123 | ||
|
124 | def retrievegitmodules(self, version): | |
|
125 | modules, ret = self.gitread("git show %s:%s" % (version, '.gitmodules')) | |
|
126 | if ret: | |
|
127 | raise util.Abort(_('cannot read submodules config file in %s') % | |
|
128 | version) | |
|
129 | self.parsegitmodules(modules) | |
|
130 | for m in self.submodules: | |
|
131 | node, ret = self.gitread("git rev-parse %s:%s" % (version, m.path)) | |
|
132 | if ret: | |
|
133 | continue | |
|
134 | m.node = node.strip() | |
|
135 | ||
|
83 | 136 | def getchanges(self, version): |
|
84 | 137 | self.modecache = {} |
|
85 | 138 | fh = self.gitopen("git diff-tree -z --root -m -r %s" % version) |
|
86 | 139 | changes = [] |
|
87 | 140 | seen = set() |
|
88 | 141 | entry = None |
|
142 | subexists = False | |
|
89 | 143 | for l in fh.read().split('\x00'): |
|
90 | 144 | if not entry: |
|
91 | 145 | if not l.startswith(':'): |
@@ -97,15 +151,24 b' class convert_git(converter_source):' | |||
|
97 | 151 | seen.add(f) |
|
98 | 152 | entry = entry.split() |
|
99 | 153 | h = entry[3] |
|
100 | if entry[1] == '160000': | |
|
101 | raise util.Abort('git submodules are not supported!') | |
|
102 | 154 | p = (entry[1] == "100755") |
|
103 | 155 | s = (entry[1] == "120000") |
|
104 | self.modecache[(f, h)] = (p and "x") or (s and "l") or "" | |
|
105 | changes.append((f, h)) | |
|
156 | ||
|
157 | if f == '.gitmodules': | |
|
158 | subexists = True | |
|
159 | changes.append(('.hgsub', '')) | |
|
160 | elif entry[1] == '160000' or entry[0] == ':160000': | |
|
161 | subexists = True | |
|
162 | else: | |
|
163 | self.modecache[(f, h)] = (p and "x") or (s and "l") or "" | |
|
164 | changes.append((f, h)) | |
|
106 | 165 | entry = None |
|
107 | 166 | if fh.close(): |
|
108 | 167 | raise util.Abort(_('cannot read changes in %s') % version) |
|
168 | ||
|
169 | if subexists: | |
|
170 | self.retrievegitmodules(version) | |
|
171 | changes.append(('.hgsubstate', '')) | |
|
109 | 172 | return (changes, {}) |
|
110 | 173 | |
|
111 | 174 | def getcommit(self, version): |
@@ -110,7 +110,7 b' class mercurial_sink(converter_sink):' | |||
|
110 | 110 | |
|
111 | 111 | if missings: |
|
112 | 112 | self.after() |
|
113 | for pbranch, heads in missings.iteritems(): | |
|
113 | for pbranch, heads in sorted(missings.iteritems()): | |
|
114 | 114 | pbranchpath = os.path.join(self.path, pbranch) |
|
115 | 115 | prepo = hg.peer(self.ui, {}, pbranchpath) |
|
116 | 116 | self.ui.note(_('pulling from %s into %s\n') % (pbranch, branch)) |
@@ -219,9 +219,10 b' class mercurial_sink(converter_sink):' | |||
|
219 | 219 | return |
|
220 | 220 | |
|
221 | 221 | self.ui.status(_("updating bookmarks\n")) |
|
222 | destmarks = self.repo._bookmarks | |
|
222 | 223 | for bookmark in updatedbookmark: |
|
223 |
|
|
|
224 |
|
|
|
224 | destmarks[bookmark] = bin(updatedbookmark[bookmark]) | |
|
225 | destmarks.write() | |
|
225 | 226 | |
|
226 | 227 | def hascommit(self, rev): |
|
227 | 228 | if rev not in self.repo and self.clonebranches: |
@@ -18,6 +18,7 b' from cStringIO import StringIO' | |||
|
18 | 18 | |
|
19 | 19 | from common import NoRepo, MissingTool, commit, encodeargs, decodeargs |
|
20 | 20 | from common import commandline, converter_source, converter_sink, mapfile |
|
21 | from common import makedatetimestamp | |
|
21 | 22 | |
|
22 | 23 | try: |
|
23 | 24 | from svn.core import SubversionException, Pool |
@@ -376,7 +377,7 b' class svn_source(converter_source):' | |||
|
376 | 377 | rpath = self.url.strip('/') |
|
377 | 378 | branchnames = svn.client.ls(rpath + '/' + quote(branches), |
|
378 | 379 | rev, False, self.ctx) |
|
379 |
for branch in branchnames |
|
|
380 | for branch in sorted(branchnames): | |
|
380 | 381 | module = '%s/%s/%s' % (oldmodule, branches, branch) |
|
381 | 382 | if not isdir(module, self.last_changed): |
|
382 | 383 | continue |
@@ -802,6 +803,8 b' class svn_source(converter_source):' | |||
|
802 | 803 | # ISO-8601 conformant |
|
803 | 804 | # '2007-01-04T17:35:00.902377Z' |
|
804 | 805 | date = util.parsedate(date[:19] + " UTC", ["%Y-%m-%dT%H:%M:%S"]) |
|
806 | if self.ui.configbool('convert', 'localtimezone'): | |
|
807 | date = makedatetimestamp(date[0]) | |
|
805 | 808 | |
|
806 | 809 | log = message and self.recode(message) or '' |
|
807 | 810 | author = author and self.recode(author) or '' |
@@ -307,7 +307,7 b' def reposetup(ui, repo):' | |||
|
307 | 307 | eolmtime = 0 |
|
308 | 308 | |
|
309 | 309 | if eolmtime > cachemtime: |
|
310 | ui.debug("eol: detected change in .hgeol\n") | |
|
310 | self.ui.debug("eol: detected change in .hgeol\n") | |
|
311 | 311 | wlock = None |
|
312 | 312 | try: |
|
313 | 313 | wlock = self.wlock() |
@@ -39,7 +39,6 b" testedwith = 'internal'" | |||
|
39 | 39 | _('show changesets within the given named branch'), _('BRANCH')), |
|
40 | 40 | ('P', 'prune', [], |
|
41 | 41 | _('do not display revision or any of its ancestors'), _('REV')), |
|
42 | ('', 'hidden', False, _('show hidden changesets (DEPRECATED)')), | |
|
43 | 42 | ] + commands.logopts + commands.walkopts, |
|
44 | 43 | _('[OPTION]... [FILE]')) |
|
45 | 44 | def graphlog(ui, repo, *pats, **opts): |
@@ -98,9 +98,9 b' def catcommit(ui, repo, n, prefix, ctx=N' | |||
|
98 | 98 | if ctx is None: |
|
99 | 99 | ctx = repo[n] |
|
100 | 100 | # use ctx.node() instead ?? |
|
101 | ui.write("tree %s\n" % short(ctx.changeset()[0])) | |
|
101 | ui.write(("tree %s\n" % short(ctx.changeset()[0]))) | |
|
102 | 102 | for p in ctx.parents(): |
|
103 | ui.write("parent %s\n" % p) | |
|
103 | ui.write(("parent %s\n" % p)) | |
|
104 | 104 | |
|
105 | 105 | date = ctx.date() |
|
106 | 106 | description = ctx.description().replace("\0", "") |
@@ -108,12 +108,13 b' def catcommit(ui, repo, n, prefix, ctx=N' | |||
|
108 | 108 | if lines and lines[-1].startswith('committer:'): |
|
109 | 109 | committer = lines[-1].split(': ')[1].rstrip() |
|
110 | 110 | else: |
|
111 |
committer = |
|
|
111 | committer = "" | |
|
112 | 112 | |
|
113 | ui.write("author %s %s %s\n" % (ctx.user(), int(date[0]), date[1])) | |
|
114 | ui.write("committer %s %s %s\n" % (committer, int(date[0]), date[1])) | |
|
115 | ui.write("revision %d\n" % ctx.rev()) | |
|
116 |
ui.write(" |
|
|
113 | ui.write(("author %s %s %s\n" % (ctx.user(), int(date[0]), date[1]))) | |
|
114 | if committer != '': | |
|
115 | ui.write(("committer %s %s %s\n" % (committer, int(date[0]), date[1]))) | |
|
116 | ui.write(("revision %d\n" % ctx.rev())) | |
|
117 | ui.write(("branch %s\n\n" % ctx.branch())) | |
|
117 | 118 | |
|
118 | 119 | if prefix != "": |
|
119 | 120 | ui.write("%s%s\n" % (prefix, |
@@ -302,7 +303,7 b' def revlist(ui, repo, *revs, **opts):' | |||
|
302 | 303 | def config(ui, repo, **opts): |
|
303 | 304 | """print extension options""" |
|
304 | 305 | def writeopt(name, value): |
|
305 | ui.write('k=%s\nv=%s\n' % (name, value)) | |
|
306 | ui.write(('k=%s\nv=%s\n' % (name, value))) | |
|
306 | 307 | |
|
307 | 308 | writeopt('vdiff', ui.config('hgk', 'vdiff', '')) |
|
308 | 309 |
@@ -50,7 +50,7 b' def pygmentize(field, fctx, style, tmpl)' | |||
|
50 | 50 | colorized = highlight(text, lexer, formatter) |
|
51 | 51 | # strip wrapping div |
|
52 | 52 | colorized = colorized[:colorized.find('\n</pre>')] |
|
53 | colorized = colorized[colorized.find('<pre>')+5:] | |
|
53 | colorized = colorized[colorized.find('<pre>') + 5:] | |
|
54 | 54 | coloriter = (s.encode(encoding.encoding, 'replace') |
|
55 | 55 | for s in colorized.splitlines()) |
|
56 | 56 |
@@ -144,7 +144,6 b' except ImportError:' | |||
|
144 | 144 | import pickle |
|
145 | 145 | import os |
|
146 | 146 | |
|
147 | from mercurial import bookmarks | |
|
148 | 147 | from mercurial import cmdutil |
|
149 | 148 | from mercurial import discovery |
|
150 | 149 | from mercurial import error |
@@ -177,6 +176,31 b' editcomment = _("""# Edit history betwee' | |||
|
177 | 176 | # |
|
178 | 177 | """) |
|
179 | 178 | |
|
179 | def commitfuncfor(repo, src): | |
|
180 | """Build a commit function for the replacement of <src> | |
|
181 | ||
|
182 | This function ensure we apply the same treatement to all changesets. | |
|
183 | ||
|
184 | - Add a 'histedit_source' entry in extra. | |
|
185 | ||
|
186 | Note that fold have its own separated logic because its handling is a bit | |
|
187 | different and not easily factored out of the fold method. | |
|
188 | """ | |
|
189 | phasemin = src.phase() | |
|
190 | def commitfunc(**kwargs): | |
|
191 | phasebackup = repo.ui.backupconfig('phases', 'new-commit') | |
|
192 | try: | |
|
193 | repo.ui.setconfig('phases', 'new-commit', phasemin) | |
|
194 | extra = kwargs.get('extra', {}).copy() | |
|
195 | extra['histedit_source'] = src.hex() | |
|
196 | kwargs['extra'] = extra | |
|
197 | return repo.commit(**kwargs) | |
|
198 | finally: | |
|
199 | repo.ui.restoreconfig(phasebackup) | |
|
200 | return commitfunc | |
|
201 | ||
|
202 | ||
|
203 | ||
|
180 | 204 | def applychanges(ui, repo, ctx, opts): |
|
181 | 205 | """Merge changeset from ctx (only) in the current working directory""" |
|
182 | 206 | wcpar = repo.dirstate.parents()[0] |
@@ -255,7 +279,7 b' def collapse(repo, first, last, commitop' | |||
|
255 | 279 | message = first.description() |
|
256 | 280 | user = commitopts.get('user') |
|
257 | 281 | date = commitopts.get('date') |
|
258 |
extra = |
|
|
282 | extra = commitopts.get('extra') | |
|
259 | 283 | |
|
260 | 284 | parents = (first.p1().node(), first.p2().node()) |
|
261 | 285 | new = context.memctx(repo, |
@@ -280,8 +304,9 b' def pick(ui, repo, ctx, ha, opts):' | |||
|
280 | 304 | raise util.Abort(_('Fix up the change and run ' |
|
281 | 305 | 'hg histedit --continue')) |
|
282 | 306 | # drop the second merge parent |
|
283 | n = repo.commit(text=oldctx.description(), user=oldctx.user(), | |
|
284 | date=oldctx.date(), extra=oldctx.extra()) | |
|
307 | commit = commitfuncfor(repo, oldctx) | |
|
308 | n = commit(text=oldctx.description(), user=oldctx.user(), | |
|
309 | date=oldctx.date(), extra=oldctx.extra()) | |
|
285 | 310 | if n is None: |
|
286 | 311 | ui.warn(_('%s: empty changeset\n') |
|
287 | 312 | % node.hex(ha)) |
@@ -332,7 +357,19 b' def finishfold(ui, repo, ctx, oldctx, ne' | |||
|
332 | 357 | commitopts['message'] = newmessage |
|
333 | 358 | # date |
|
334 | 359 | commitopts['date'] = max(ctx.date(), oldctx.date()) |
|
335 | n = collapse(repo, ctx, repo[newnode], commitopts) | |
|
360 | extra = ctx.extra().copy() | |
|
361 | # histedit_source | |
|
362 | # note: ctx is likely a temporary commit but that the best we can do here | |
|
363 | # This is sufficient to solve issue3681 anyway | |
|
364 | extra['histedit_source'] = '%s,%s' % (ctx.hex(), oldctx.hex()) | |
|
365 | commitopts['extra'] = extra | |
|
366 | phasebackup = repo.ui.backupconfig('phases', 'new-commit') | |
|
367 | try: | |
|
368 | phasemin = max(ctx.phase(), oldctx.phase()) | |
|
369 | repo.ui.setconfig('phases', 'new-commit', phasemin) | |
|
370 | n = collapse(repo, ctx, repo[newnode], commitopts) | |
|
371 | finally: | |
|
372 | repo.ui.restoreconfig(phasebackup) | |
|
336 | 373 | if n is None: |
|
337 | 374 | return ctx, [] |
|
338 | 375 | hg.update(repo, n) |
@@ -357,8 +394,9 b' def message(ui, repo, ctx, ha, opts):' | |||
|
357 | 394 | 'hg histedit --continue')) |
|
358 | 395 | message = oldctx.description() + '\n' |
|
359 | 396 | message = ui.edit(message, ui.username()) |
|
360 | new = repo.commit(text=message, user=oldctx.user(), date=oldctx.date(), | |
|
361 | extra=oldctx.extra()) | |
|
397 | commit = commitfuncfor(repo, oldctx) | |
|
398 | new = commit(text=message, user=oldctx.user(), date=oldctx.date(), | |
|
399 | extra=oldctx.extra()) | |
|
362 | 400 | newctx = repo[new] |
|
363 | 401 | if oldctx.node() != newctx.node(): |
|
364 | 402 | return newctx, [(oldctx.node(), (new,))] |
@@ -559,9 +597,10 b' def bootstrapcontinue(ui, repo, parentct' | |||
|
559 | 597 | editor = cmdutil.commitforceeditor |
|
560 | 598 | else: |
|
561 | 599 | editor = False |
|
562 | new = repo.commit(text=message, user=ctx.user(), | |
|
563 | date=ctx.date(), extra=ctx.extra(), | |
|
564 |
|
|
|
600 | commit = commitfuncfor(repo, ctx) | |
|
601 | new = commit(text=message, user=ctx.user(), | |
|
602 | date=ctx.date(), extra=ctx.extra(), | |
|
603 | editor=editor) | |
|
565 | 604 | if new is not None: |
|
566 | 605 | newchildren.append(new) |
|
567 | 606 | |
@@ -594,7 +633,8 b' def between(repo, old, new, keep):' | |||
|
594 | 633 | When keep is false, the specified set can't have children.""" |
|
595 | 634 | ctxs = list(repo.set('%n::%n', old, new)) |
|
596 | 635 | if ctxs and not keep: |
|
597 | if repo.revs('(%ld::) - (%ld + hidden())', ctxs, ctxs): | |
|
636 | if (not obsolete._enabled and | |
|
637 | repo.revs('(%ld::) - (%ld)', ctxs, ctxs)): | |
|
598 | 638 | raise util.Abort(_('cannot edit history that would orphan nodes')) |
|
599 | 639 | root = ctxs[0] # list is already sorted by repo.set |
|
600 | 640 | if not root.phase(): |
@@ -720,9 +760,9 b' def movebookmarks(ui, repo, mapping, old' | |||
|
720 | 760 | # if nothing got rewritten there is not purpose for this function |
|
721 | 761 | return |
|
722 | 762 | moves = [] |
|
723 | for bk, old in repo._bookmarks.iteritems(): | |
|
763 | for bk, old in sorted(repo._bookmarks.iteritems()): | |
|
724 | 764 | if old == oldtopmost: |
|
725 |
# special case ensure bookmark stay on tip. |
|
|
765 | # special case ensure bookmark stay on tip. | |
|
726 | 766 | # |
|
727 | 767 | # This is arguably a feature and we may only want that for the |
|
728 | 768 | # active bookmark. But the behavior is kept compatible with the old |
@@ -740,12 +780,13 b' def movebookmarks(ui, repo, mapping, old' | |||
|
740 | 780 | # nothing to move |
|
741 | 781 | moves.append((bk, new[-1])) |
|
742 | 782 | if moves: |
|
783 | marks = repo._bookmarks | |
|
743 | 784 | for mark, new in moves: |
|
744 |
old = |
|
|
785 | old = marks[mark] | |
|
745 | 786 | ui.note(_('histedit: moving bookmarks %s from %s to %s\n') |
|
746 | 787 | % (mark, node.short(old), node.short(new))) |
|
747 |
|
|
|
748 |
|
|
|
788 | marks[mark] = new | |
|
789 | marks.write() | |
|
749 | 790 | |
|
750 | 791 | def cleanupnode(ui, repo, name, nodes): |
|
751 | 792 | """strip a group of nodes from the repository |
@@ -72,7 +72,7 b' class event(object):' | |||
|
72 | 72 | |
|
73 | 73 | def __repr__(self): |
|
74 | 74 | r = repr(self.raw) |
|
75 | return 'event(path=' + repr(self.path) + ', ' + r[r.find('(')+1:] | |
|
75 | return 'event(path=' + repr(self.path) + ', ' + r[r.find('(') + 1:] | |
|
76 | 76 | |
|
77 | 77 | |
|
78 | 78 | _event_props = { |
@@ -405,14 +405,7 b' class socketlistener(server.socketlisten' | |||
|
405 | 405 | |
|
406 | 406 | def shutdown(self): |
|
407 | 407 | self.sock.close() |
|
408 | try: | |
|
409 | os.unlink(self.sockpath) | |
|
410 | if self.realsockpath: | |
|
411 | os.unlink(self.realsockpath) | |
|
412 | os.rmdir(os.path.dirname(self.realsockpath)) | |
|
413 | except OSError, err: | |
|
414 | if err.errno != errno.ENOENT: | |
|
415 | raise | |
|
408 | self.sock.cleanup() | |
|
416 | 409 | |
|
417 | 410 | def answer_stat_query(self, cs): |
|
418 | 411 | if self.repowatcher.timeout: |
@@ -6,7 +6,7 b'' | |||
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from mercurial.i18n import _ |
|
9 | from mercurial import cmdutil, osutil, util | |
|
9 | from mercurial import cmdutil, posix, osutil, util | |
|
10 | 10 | import common |
|
11 | 11 | |
|
12 | 12 | import errno |
@@ -15,7 +15,6 b' import socket' | |||
|
15 | 15 | import stat |
|
16 | 16 | import struct |
|
17 | 17 | import sys |
|
18 | import tempfile | |
|
19 | 18 | |
|
20 | 19 | class AlreadyStartedException(Exception): |
|
21 | 20 | pass |
@@ -330,42 +329,15 b' class socketlistener(object):' | |||
|
330 | 329 | def __init__(self, ui, root, repowatcher, timeout): |
|
331 | 330 | self.ui = ui |
|
332 | 331 | self.repowatcher = repowatcher |
|
333 | self.sock = socket.socket(socket.AF_UNIX) | |
|
334 | self.sockpath = join(root, '.hg/inotify.sock') | |
|
335 | ||
|
336 | self.realsockpath = self.sockpath | |
|
337 | if os.path.islink(self.sockpath): | |
|
338 | if os.path.exists(self.sockpath): | |
|
339 | self.realsockpath = os.readlink(self.sockpath) | |
|
340 | else: | |
|
341 | raise util.Abort('inotify-server: cannot start: ' | |
|
342 | '.hg/inotify.sock is a broken symlink') | |
|
343 | 332 | try: |
|
344 |
self.sock. |
|
|
345 | except socket.error, err: | |
|
333 | self.sock = posix.unixdomainserver( | |
|
334 | lambda p: os.path.join(root, '.hg', p), | |
|
335 | 'inotify') | |
|
336 | except (OSError, socket.error), err: | |
|
346 | 337 | if err.args[0] == errno.EADDRINUSE: |
|
347 |
raise AlreadyStartedException(_('cannot start: |
|
|
348 | 'already bound')) | |
|
349 | if err.args[0] == "AF_UNIX path too long": | |
|
350 | tempdir = tempfile.mkdtemp(prefix="hg-inotify-") | |
|
351 | self.realsockpath = os.path.join(tempdir, "inotify.sock") | |
|
352 | try: | |
|
353 | self.sock.bind(self.realsockpath) | |
|
354 | os.symlink(self.realsockpath, self.sockpath) | |
|
355 | except (OSError, socket.error), inst: | |
|
356 | try: | |
|
357 | os.unlink(self.realsockpath) | |
|
358 | except OSError: | |
|
359 | pass | |
|
360 | os.rmdir(tempdir) | |
|
361 | if inst.errno == errno.EEXIST: | |
|
362 | raise AlreadyStartedException(_('cannot start: tried ' | |
|
363 | 'linking .hg/inotify.sock to a temporary socket but' | |
|
364 | ' .hg/inotify.sock already exists')) | |
|
365 | raise | |
|
366 | else: | |
|
367 | raise | |
|
368 | self.sock.listen(5) | |
|
338 | raise AlreadyStartedException(_('cannot start: ' | |
|
339 | 'socket is already bound')) | |
|
340 | raise | |
|
369 | 341 | self.fileno = self.sock.fileno |
|
370 | 342 | |
|
371 | 343 | def answer_stat_query(self, cs): |
@@ -26,14 +26,8 b' class StoreError(Exception):' | |||
|
26 | 26 | self.detail = detail |
|
27 | 27 | |
|
28 | 28 | def longmessage(self): |
|
29 | if self.url: | |
|
30 | return ('%s: %s\n' | |
|
31 | '(failed URL: %s)\n' | |
|
32 | % (self.filename, self.detail, self.url)) | |
|
33 | else: | |
|
34 | return ('%s: %s\n' | |
|
35 | '(no default or default-push path set in hgrc)\n' | |
|
36 | % (self.filename, self.detail)) | |
|
29 | return (_("error getting %s from %s for %s: %s\n") % | |
|
30 | (self.hash, self.url, self.filename, self.detail)) | |
|
37 | 31 | |
|
38 | 32 | def __str__(self): |
|
39 | 33 | return "%s: %s" % (self.url, self.detail) |
@@ -383,6 +383,13 b' def verifylfiles(ui, repo, all=False, co' | |||
|
383 | 383 | store = basestore._openstore(repo) |
|
384 | 384 | return store.verify(revs, contents=contents) |
|
385 | 385 | |
|
386 | def debugdirstate(ui, repo): | |
|
387 | '''Show basic information for the largefiles dirstate''' | |
|
388 | lfdirstate = lfutil.openlfdirstate(ui, repo) | |
|
389 | for file_, ent in sorted(lfdirstate._map.iteritems()): | |
|
390 | mode = '%3o' % (ent[1] & 0777 & ~util.umask) | |
|
391 | ui.write("%c %s %10d %s\n" % (ent[0], mode, ent[2], file_)) | |
|
392 | ||
|
386 | 393 | def cachelfiles(ui, repo, node, filelist=None): |
|
387 | 394 | '''cachelfiles ensures that all largefiles needed by the specified revision |
|
388 | 395 | are present in the repository's largefile cache. |
@@ -18,43 +18,10 b' from mercurial import dirstate, httpconn' | |||
|
18 | 18 | from mercurial.i18n import _ |
|
19 | 19 | |
|
20 | 20 | shortname = '.hglf' |
|
21 | shortnameslash = shortname + '/' | |
|
21 | 22 | longname = 'largefiles' |
|
22 | 23 | |
|
23 | 24 | |
|
24 | # -- Portability wrappers ---------------------------------------------- | |
|
25 | ||
|
26 | def dirstatewalk(dirstate, matcher, unknown=False, ignored=False): | |
|
27 | return dirstate.walk(matcher, [], unknown, ignored) | |
|
28 | ||
|
29 | def repoadd(repo, list): | |
|
30 | add = repo[None].add | |
|
31 | return add(list) | |
|
32 | ||
|
33 | def reporemove(repo, list, unlink=False): | |
|
34 | def remove(list, unlink): | |
|
35 | wlock = repo.wlock() | |
|
36 | try: | |
|
37 | if unlink: | |
|
38 | for f in list: | |
|
39 | try: | |
|
40 | util.unlinkpath(repo.wjoin(f)) | |
|
41 | except OSError, inst: | |
|
42 | if inst.errno != errno.ENOENT: | |
|
43 | raise | |
|
44 | repo[None].forget(list) | |
|
45 | finally: | |
|
46 | wlock.release() | |
|
47 | return remove(list, unlink=unlink) | |
|
48 | ||
|
49 | def repoforget(repo, list): | |
|
50 | forget = repo[None].forget | |
|
51 | return forget(list) | |
|
52 | ||
|
53 | def findoutgoing(repo, remote, force): | |
|
54 | from mercurial import discovery | |
|
55 | outgoing = discovery.findcommonoutgoing(repo, remote.peer(), force=force) | |
|
56 | return outgoing.missing | |
|
57 | ||
|
58 | 25 | # -- Private worker functions ------------------------------------------ |
|
59 | 26 | |
|
60 | 27 | def getminsize(ui, assumelfiles, opt, default=10): |
@@ -139,24 +106,26 b' class largefilesdirstate(dirstate.dirsta' | |||
|
139 | 106 | return super(largefilesdirstate, self).forget(unixpath(f)) |
|
140 | 107 | def normallookup(self, f): |
|
141 | 108 | return super(largefilesdirstate, self).normallookup(unixpath(f)) |
|
109 | def _ignore(self): | |
|
110 | return False | |
|
142 | 111 | |
|
143 | 112 | def openlfdirstate(ui, repo, create=True): |
|
144 | 113 | ''' |
|
145 | 114 | Return a dirstate object that tracks largefiles: i.e. its root is |
|
146 | 115 | the repo root, but it is saved in .hg/largefiles/dirstate. |
|
147 | 116 | ''' |
|
148 |
|
|
|
149 |
opener = scmutil.opener( |
|
|
117 | lfstoredir = repo.join(longname) | |
|
118 | opener = scmutil.opener(lfstoredir) | |
|
150 | 119 | lfdirstate = largefilesdirstate(opener, ui, repo.root, |
|
151 | 120 | repo.dirstate._validate) |
|
152 | 121 | |
|
153 | 122 | # If the largefiles dirstate does not exist, populate and create |
|
154 | 123 | # it. This ensures that we create it on the first meaningful |
|
155 | 124 | # largefiles operation in a new clone. |
|
156 |
if create and not os.path.exists(os.path.join( |
|
|
157 |
util.makedirs( |
|
|
125 | if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')): | |
|
126 | util.makedirs(lfstoredir) | |
|
158 | 127 | matcher = getstandinmatcher(repo) |
|
159 |
for standin in |
|
|
128 | for standin in repo.dirstate.walk(matcher, [], False, False): | |
|
160 | 129 | lfile = splitstandin(standin) |
|
161 | 130 | hash = readstandin(repo, lfile) |
|
162 | 131 | lfdirstate.normallookup(lfile) |
@@ -173,8 +142,11 b' def lfdirstatestatus(lfdirstate, repo, r' | |||
|
173 | 142 | s = lfdirstate.status(match, [], False, False, False) |
|
174 | 143 | unsure, modified, added, removed, missing, unknown, ignored, clean = s |
|
175 | 144 | for lfile in unsure: |
|
176 | if repo[rev][standin(lfile)].data().strip() != \ | |
|
177 |
|
|
|
145 | try: | |
|
146 | fctx = repo[rev][standin(lfile)] | |
|
147 | except LookupError: | |
|
148 | fctx = None | |
|
149 | if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)): | |
|
178 | 150 | modified.append(lfile) |
|
179 | 151 | else: |
|
180 | 152 | clean.append(lfile) |
@@ -250,7 +222,7 b' def linktousercache(repo, hash):' | |||
|
250 | 222 | |
|
251 | 223 | def getstandinmatcher(repo, pats=[], opts={}): |
|
252 | 224 | '''Return a match object that applies pats to the standin directory''' |
|
253 |
standindir = repo. |
|
|
225 | standindir = repo.wjoin(shortname) | |
|
254 | 226 | if pats: |
|
255 | 227 | # patterns supplied: search standin directory relative to current dir |
|
256 | 228 | cwd = repo.getcwd() |
@@ -264,19 +236,11 b' def getstandinmatcher(repo, pats=[], opt' | |||
|
264 | 236 | pats = [standindir] |
|
265 | 237 | else: |
|
266 | 238 | # no patterns and no standin dir: return matcher that matches nothing |
|
267 |
|
|
|
268 | match.matchfn = lambda f: False | |
|
269 | return match | |
|
270 | return getmatcher(repo, pats, opts, showbad=False) | |
|
239 | return match_.match(repo.root, None, [], exact=True) | |
|
271 | 240 | |
|
272 | def getmatcher(repo, pats=[], opts={}, showbad=True): | |
|
273 | '''Wrapper around scmutil.match() that adds showbad: if false, | |
|
274 | neuter the match object's bad() method so it does not print any | |
|
275 | warnings about missing files or directories.''' | |
|
241 | # no warnings about missing files or directories | |
|
276 | 242 | match = scmutil.match(repo[None], pats, opts) |
|
277 | ||
|
278 | if not showbad: | |
|
279 | match.bad = lambda f, msg: None | |
|
243 | match.bad = lambda f, msg: None | |
|
280 | 244 | return match |
|
281 | 245 | |
|
282 | 246 | def composestandinmatcher(repo, rmatcher): |
@@ -296,17 +260,17 b' def standin(filename):' | |||
|
296 | 260 | file.''' |
|
297 | 261 | # Notes: |
|
298 | 262 | # 1) Some callers want an absolute path, but for instance addlargefiles |
|
299 |
# needs it repo-relative so it can be passed to repoadd(). So |
|
|
300 | # it up to the caller to use repo.wjoin() to get an absolute path. | |
|
263 | # needs it repo-relative so it can be passed to repo[None].add(). So | |
|
264 | # leave it up to the caller to use repo.wjoin() to get an absolute path. | |
|
301 | 265 | # 2) Join with '/' because that's what dirstate always uses, even on |
|
302 | 266 | # Windows. Change existing separator to '/' first in case we are |
|
303 | 267 | # passed filenames from an external source (like the command line). |
|
304 |
return shortname |
|
|
268 | return shortnameslash + util.pconvert(filename) | |
|
305 | 269 | |
|
306 | 270 | def isstandin(filename): |
|
307 | 271 | '''Return true if filename is a big file standin. filename must be |
|
308 | 272 | in Mercurial's internal form (slash-separated).''' |
|
309 |
return filename.startswith(shortname |
|
|
273 | return filename.startswith(shortnameslash) | |
|
310 | 274 | |
|
311 | 275 | def splitstandin(filename): |
|
312 | 276 | # Split on / because that's what dirstate always uses, even on Windows. |
@@ -435,7 +399,7 b' def unixpath(path):' | |||
|
435 | 399 | |
|
436 | 400 | def islfilesrepo(repo): |
|
437 | 401 | if ('largefiles' in repo.requirements and |
|
438 |
util.any(shortname |
|
|
402 | util.any(shortnameslash in f[0] for f in repo.store.datafiles())): | |
|
439 | 403 | return True |
|
440 | 404 | |
|
441 | 405 | return util.any(openlfdirstate(repo.ui, repo, False)) |
@@ -455,9 +419,13 b' def getcurrentheads(repo):' | |||
|
455 | 419 | def getstandinsstate(repo): |
|
456 | 420 | standins = [] |
|
457 | 421 | matcher = getstandinmatcher(repo) |
|
458 |
for standin in |
|
|
422 | for standin in repo.dirstate.walk(matcher, [], False, False): | |
|
459 | 423 | lfile = splitstandin(standin) |
|
460 | standins.append((lfile, readstandin(repo, lfile))) | |
|
424 | try: | |
|
425 | hash = readstandin(repo, lfile) | |
|
426 | except IOError: | |
|
427 | hash = None | |
|
428 | standins.append((lfile, hash)) | |
|
461 | 429 | return standins |
|
462 | 430 | |
|
463 | 431 | def getlfilestoupdate(oldstandins, newstandins): |
@@ -22,9 +22,8 b' class localstore(basestore.basestore):' | |||
|
22 | 22 | the user cache.''' |
|
23 | 23 | |
|
24 | 24 | def __init__(self, ui, repo, remote): |
|
25 | url = os.path.join(remote.local().path, '.hg', lfutil.longname) | |
|
26 | super(localstore, self).__init__(ui, repo, util.expandpath(url)) | |
|
27 | 25 | self.remote = remote.local() |
|
26 | super(localstore, self).__init__(ui, repo, self.remote.url()) | |
|
28 | 27 | |
|
29 | 28 | def put(self, source, hash): |
|
30 | 29 | util.makedirs(os.path.dirname(lfutil.storepath(self.remote, hash))) |
@@ -46,7 +45,7 b' class localstore(basestore.basestore):' | |||
|
46 | 45 | elif lfutil.inusercache(self.ui, hash): |
|
47 | 46 | path = lfutil.usercachepath(self.ui, hash) |
|
48 | 47 | else: |
|
49 |
raise basestore.StoreError(filename, hash, |
|
|
48 | raise basestore.StoreError(filename, hash, self.url, | |
|
50 | 49 | _("can't get file locally")) |
|
51 | 50 | fd = open(path, 'rb') |
|
52 | 51 | try: |
@@ -12,7 +12,7 b' import os' | |||
|
12 | 12 | import copy |
|
13 | 13 | |
|
14 | 14 | from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \ |
|
15 | node, archival, error, merge | |
|
15 | node, archival, error, merge, discovery | |
|
16 | 16 | from mercurial.i18n import _ |
|
17 | 17 | from mercurial.node import hex |
|
18 | 18 | from hgext import rebase |
@@ -116,7 +116,7 b' def addlargefiles(ui, repo, *pats, **opt' | |||
|
116 | 116 | lfdirstate.add(f) |
|
117 | 117 | lfdirstate.write() |
|
118 | 118 | bad += [lfutil.splitstandin(f) |
|
119 |
for f in |
|
|
119 | for f in repo[None].add(standins) | |
|
120 | 120 | if f in m.files()] |
|
121 | 121 | finally: |
|
122 | 122 | wlock.release() |
@@ -137,21 +137,23 b' def removelargefiles(ui, repo, *pats, **' | |||
|
137 | 137 | if lfutil.standin(f) in manifest] |
|
138 | 138 | for list in [s[0], s[1], s[3], s[6]]] |
|
139 | 139 | |
|
140 |
def warn(files, |
|
|
140 | def warn(files, msg): | |
|
141 | 141 | for f in files: |
|
142 | ui.warn(_('not removing %s: %s (use forget to undo)\n') | |
|
143 | % (m.rel(f), reason)) | |
|
142 | ui.warn(msg % m.rel(f)) | |
|
144 | 143 | return int(len(files) > 0) |
|
145 | 144 | |
|
146 | 145 | result = 0 |
|
147 | 146 | |
|
148 | 147 | if after: |
|
149 | 148 | remove, forget = deleted, [] |
|
150 |
result = warn(modified + added + clean, |
|
|
149 | result = warn(modified + added + clean, | |
|
150 | _('not removing %s: file still exists\n')) | |
|
151 | 151 | else: |
|
152 | 152 | remove, forget = deleted + clean, [] |
|
153 |
result = warn(modified, _('file is modified' |
|
|
154 | result = warn(added, _('file has been marked for add')) or result | |
|
153 | result = warn(modified, _('not removing %s: file is modified (use -f' | |
|
154 | ' to force removal)\n')) | |
|
155 | result = warn(added, _('not removing %s: file has been marked for add' | |
|
156 | ' (use forget to undo)\n')) or result | |
|
155 | 157 | |
|
156 | 158 | for f in sorted(remove + forget): |
|
157 | 159 | if ui.verbose or not m.exact(f): |
@@ -168,19 +170,18 b' def removelargefiles(ui, repo, *pats, **' | |||
|
168 | 170 | # are removing the file. |
|
169 | 171 | if getattr(repo, "_isaddremove", False): |
|
170 | 172 | ui.status(_('removing %s\n') % f) |
|
171 |
|
|
|
172 | util.unlinkpath(repo.wjoin(f)) | |
|
173 | util.unlinkpath(repo.wjoin(f), ignoremissing=True) | |
|
173 | 174 | lfdirstate.remove(f) |
|
174 | 175 | lfdirstate.write() |
|
175 | 176 | forget = [lfutil.standin(f) for f in forget] |
|
176 | 177 | remove = [lfutil.standin(f) for f in remove] |
|
177 |
|
|
|
178 | repo[None].forget(forget) | |
|
178 | 179 | # If this is being called by addremove, let the original addremove |
|
179 | 180 | # function handle this. |
|
180 | 181 | if not getattr(repo, "_isaddremove", False): |
|
181 | lfutil.reporemove(repo, remove, unlink=True) | |
|
182 | else: | |
|
183 | lfutil.reporemove(repo, remove, unlink=False) | |
|
182 | for f in remove: | |
|
183 | util.unlinkpath(repo.wjoin(f), ignoremissing=True) | |
|
184 | repo[None].forget(remove) | |
|
184 | 185 | finally: |
|
185 | 186 | wlock.release() |
|
186 | 187 | |
@@ -238,11 +239,34 b' def overridedirty(orig, repo, ignoreupda' | |||
|
238 | 239 | repo._repo.lfstatus = False |
|
239 | 240 | |
|
240 | 241 | def overridelog(orig, ui, repo, *pats, **opts): |
|
242 | def overridematch(ctx, pats=[], opts={}, globbed=False, | |
|
243 | default='relpath'): | |
|
244 | """Matcher that merges root directory with .hglf, suitable for log. | |
|
245 | It is still possible to match .hglf directly. | |
|
246 | For any listed files run log on the standin too. | |
|
247 | matchfn tries both the given filename and with .hglf stripped. | |
|
248 | """ | |
|
249 | match = oldmatch(ctx, pats, opts, globbed, default) | |
|
250 | m = copy.copy(match) | |
|
251 | standins = [lfutil.standin(f) for f in m._files] | |
|
252 | m._files.extend(standins) | |
|
253 | m._fmap = set(m._files) | |
|
254 | origmatchfn = m.matchfn | |
|
255 | def lfmatchfn(f): | |
|
256 | lf = lfutil.splitstandin(f) | |
|
257 | if lf is not None and origmatchfn(lf): | |
|
258 | return True | |
|
259 | r = origmatchfn(f) | |
|
260 | return r | |
|
261 | m.matchfn = lfmatchfn | |
|
262 | return m | |
|
263 | oldmatch = installmatchfn(overridematch) | |
|
241 | 264 | try: |
|
242 | 265 | repo.lfstatus = True |
|
243 | 266 | return orig(ui, repo, *pats, **opts) |
|
244 | 267 | finally: |
|
245 | 268 | repo.lfstatus = False |
|
269 | restorematchfn() | |
|
246 | 270 | |
|
247 | 271 | def overrideverify(orig, ui, repo, *pats, **opts): |
|
248 | 272 | large = opts.pop('large', False) |
@@ -254,6 +278,13 b' def overrideverify(orig, ui, repo, *pats' | |||
|
254 | 278 | result = result or lfcommands.verifylfiles(ui, repo, all, contents) |
|
255 | 279 | return result |
|
256 | 280 | |
|
281 | def overridedebugstate(orig, ui, repo, *pats, **opts): | |
|
282 | large = opts.pop('large', False) | |
|
283 | if large: | |
|
284 | lfcommands.debugdirstate(ui, repo) | |
|
285 | else: | |
|
286 | orig(ui, repo, *pats, **opts) | |
|
287 | ||
|
257 | 288 | # Override needs to refresh standins so that update's normal merge |
|
258 | 289 | # will go through properly. Then the other update hook (overriding repo.update) |
|
259 | 290 | # will get the new files. Filemerge is also overridden so that the merge |
@@ -746,7 +777,7 b' def hgclone(orig, ui, opts, *args, **kwa' | |||
|
746 | 777 | # .hg/largefiles, and the standin matcher won't match anything anyway.) |
|
747 | 778 | if 'largefiles' in repo.requirements: |
|
748 | 779 | if opts.get('noupdate'): |
|
749 |
util.makedirs(repo. |
|
|
780 | util.makedirs(repo.wjoin(lfutil.shortname)) | |
|
750 | 781 | util.makedirs(repo.join(lfutil.longname)) |
|
751 | 782 | |
|
752 | 783 | # Caching is implicitly limited to 'rev' option, since the dest repo was |
@@ -839,7 +870,7 b' def overridearchive(orig, repo, dest, no' | |||
|
839 | 870 | write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata) |
|
840 | 871 | |
|
841 | 872 | if subrepos: |
|
842 | for subpath in ctx.substate: | |
|
873 | for subpath in sorted(ctx.substate): | |
|
843 | 874 | sub = ctx.sub(subpath) |
|
844 | 875 | submatch = match_.narrowmatcher(subpath, matchfn) |
|
845 | 876 | sub.archive(repo.ui, archiver, prefix, submatch) |
@@ -886,7 +917,7 b' def hgsubrepoarchive(orig, repo, ui, arc' | |||
|
886 | 917 | |
|
887 | 918 | write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata) |
|
888 | 919 | |
|
889 | for subpath in ctx.substate: | |
|
920 | for subpath in sorted(ctx.substate): | |
|
890 | 921 | sub = ctx.sub(subpath) |
|
891 | 922 | submatch = match_.narrowmatcher(subpath, match) |
|
892 | 923 | sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/', |
@@ -949,8 +980,10 b' def overrideforget(orig, ui, repo, *pats' | |||
|
949 | 980 | else: |
|
950 | 981 | lfdirstate.remove(f) |
|
951 | 982 | lfdirstate.write() |
|
952 |
|
|
|
953 | unlink=True) | |
|
983 | standins = [lfutil.standin(f) for f in forget] | |
|
984 | for f in standins: | |
|
985 | util.unlinkpath(repo.wjoin(f), ignoremissing=True) | |
|
986 | repo[None].forget(standins) | |
|
954 | 987 | finally: |
|
955 | 988 | wlock.release() |
|
956 | 989 | |
@@ -967,10 +1000,10 b' def getoutgoinglfiles(ui, repo, dest=Non' | |||
|
967 | 1000 | remote = hg.peer(repo, opts, dest) |
|
968 | 1001 | except error.RepoError: |
|
969 | 1002 | return None |
|
970 |
o = |
|
|
971 | if not o: | |
|
972 | return o | |
|
973 | o = repo.changelog.nodesbetween(o, revs)[0] | |
|
1003 | outgoing = discovery.findcommonoutgoing(repo, remote.peer(), force=False) | |
|
1004 | if not outgoing.missing: | |
|
1005 | return outgoing.missing | |
|
1006 | o = repo.changelog.nodesbetween(outgoing.missing, revs)[0] | |
|
974 | 1007 | if opts.get('newest_first'): |
|
975 | 1008 | o.reverse() |
|
976 | 1009 | |
@@ -994,7 +1027,7 b' def getoutgoinglfiles(ui, repo, dest=Non' | |||
|
994 | 1027 | files.add(f) |
|
995 | 1028 | toupload = toupload.union( |
|
996 | 1029 | set([f for f in files if lfutil.isstandin(f) and f in ctx])) |
|
997 | return toupload | |
|
1030 | return sorted(toupload) | |
|
998 | 1031 | |
|
999 | 1032 | def overrideoutgoing(orig, ui, repo, dest=None, **opts): |
|
1000 | 1033 | result = orig(ui, repo, dest, **opts) |
@@ -1065,6 +1098,9 b' def scmutiladdremove(orig, repo, pats=[]' | |||
|
1065 | 1098 | # Calling purge with --all will cause the largefiles to be deleted. |
|
1066 | 1099 | # Override repo.status to prevent this from happening. |
|
1067 | 1100 | def overridepurge(orig, ui, repo, *dirs, **opts): |
|
1101 | # XXX large file status is buggy when used on repo proxy. | |
|
1102 | # XXX this needs to be investigate. | |
|
1103 | repo = repo.unfiltered() | |
|
1068 | 1104 | oldstatus = repo.status |
|
1069 | 1105 | def overridestatus(node1='.', node2=None, match=None, ignored=False, |
|
1070 | 1106 | clean=False, unknown=False, listsubrepos=False): |
@@ -140,19 +140,6 b' def wirereposetup(ui, repo):' | |||
|
140 | 140 | def capabilities(repo, proto): |
|
141 | 141 | return capabilitiesorig(repo, proto) + ' largefiles=serve' |
|
142 | 142 | |
|
143 | # duplicate what Mercurial's new out-of-band errors mechanism does, because | |
|
144 | # clients old and new alike both handle it well | |
|
145 | def webprotorefuseclient(self, message): | |
|
146 | self.req.header([('Content-Type', 'application/hg-error')]) | |
|
147 | return message | |
|
148 | ||
|
149 | def sshprotorefuseclient(self, message): | |
|
150 | self.ui.write_err('%s\n-\n' % message) | |
|
151 | self.fout.write('\n') | |
|
152 | self.fout.flush() | |
|
153 | ||
|
154 | return '' | |
|
155 | ||
|
156 | 143 | def heads(repo, proto): |
|
157 | 144 | if lfutil.islfilesrepo(repo): |
|
158 | 145 | return wireproto.ooberror(LARGEFILES_REQUIRED_MSG) |
@@ -11,9 +11,11 b' import copy' | |||
|
11 | 11 | import types |
|
12 | 12 | import os |
|
13 | 13 | |
|
14 | from mercurial import context, error, manifest, match as match_, util | |
|
14 | from mercurial import context, error, manifest, match as match_, util, \ | |
|
15 | discovery | |
|
15 | 16 | from mercurial import node as node_ |
|
16 | 17 | from mercurial.i18n import _ |
|
18 | from mercurial import localrepo | |
|
17 | 19 | |
|
18 | 20 | import lfcommands |
|
19 | 21 | import proto |
@@ -88,6 +90,9 b' def reposetup(ui, repo):' | |||
|
88 | 90 | # appropriate list in the result. Also removes standin files |
|
89 | 91 | # from the listing. Revert to the original status if |
|
90 | 92 | # self.lfstatus is False. |
|
93 | # XXX large file status is buggy when used on repo proxy. | |
|
94 | # XXX this needs to be investigated. | |
|
95 | @localrepo.unfilteredmethod | |
|
91 | 96 | def status(self, node1='.', node2=None, match=None, ignored=False, |
|
92 | 97 | clean=False, unknown=False, listsubrepos=False): |
|
93 | 98 | listignored, listclean, listunknown = ignored, clean, unknown |
@@ -153,78 +158,54 b' def reposetup(ui, repo):' | |||
|
153 | 158 | newfiles.append(f) |
|
154 | 159 | return newfiles |
|
155 | 160 | |
|
156 | # Create a function that we can use to override what is | |
|
157 | # normally the ignore matcher. We've already checked | |
|
158 | # for ignored files on the first dirstate walk, and | |
|
159 | # unnecessarily re-checking here causes a huge performance | |
|
160 | # hit because lfdirstate only knows about largefiles | |
|
161 | def _ignoreoverride(self): | |
|
162 | return False | |
|
163 | ||
|
164 | 161 | m = copy.copy(match) |
|
165 | 162 | m._files = tostandins(m._files) |
|
166 | 163 | |
|
167 | 164 | result = super(lfilesrepo, self).status(node1, node2, m, |
|
168 | 165 | ignored, clean, unknown, listsubrepos) |
|
169 | 166 | if working: |
|
170 | try: | |
|
171 | # Any non-largefiles that were explicitly listed must be | |
|
172 | # taken out or lfdirstate.status will report an error. | |
|
173 |
|
|
|
174 | # super's status. | |
|
175 | # Override lfdirstate's ignore matcher to not do | |
|
176 | # anything | |
|
177 | origignore = lfdirstate._ignore | |
|
178 | lfdirstate._ignore = _ignoreoverride | |
|
167 | ||
|
168 | def sfindirstate(f): | |
|
169 | sf = lfutil.standin(f) | |
|
170 | dirstate = self.dirstate | |
|
171 | return sf in dirstate or sf in dirstate.dirs() | |
|
179 | 172 | |
|
180 | def sfindirstate(f): | |
|
181 |
|
|
|
182 | dirstate = self.dirstate | |
|
183 | return sf in dirstate or sf in dirstate.dirs() | |
|
184 | match._files = [f for f in match._files | |
|
185 |
|
|
|
186 | # Don't waste time getting the ignored and unknown | |
|
187 | # files again; we already have them | |
|
188 | s = lfdirstate.status(match, [], False, | |
|
189 |
|
|
|
190 | (unsure, modified, added, removed, missing, unknown, | |
|
191 |
|
|
|
192 | # Replace the list of ignored and unknown files with | |
|
193 | # the previously calculated lists, and strip out the | |
|
194 | # largefiles | |
|
195 | lfiles = set(lfdirstate._map) | |
|
196 |
|
|
|
197 | unknown = set(result[4]).difference(lfiles) | |
|
198 |
|
|
|
199 |
|
|
|
200 | standin = lfutil.standin(lfile) | |
|
201 | if standin not in ctx1: | |
|
202 | # from second parent | |
|
203 | modified.append(lfile) | |
|
204 | elif ctx1[standin].data().strip() \ | |
|
205 |
|
|
|
173 | match._files = [f for f in match._files | |
|
174 | if sfindirstate(f)] | |
|
175 | # Don't waste time getting the ignored and unknown | |
|
176 | # files from lfdirstate | |
|
177 | s = lfdirstate.status(match, [], False, | |
|
178 | listclean, False) | |
|
179 | (unsure, modified, added, removed, missing, _unknown, | |
|
180 | _ignored, clean) = s | |
|
181 | if parentworking: | |
|
182 | for lfile in unsure: | |
|
183 | standin = lfutil.standin(lfile) | |
|
184 | if standin not in ctx1: | |
|
185 | # from second parent | |
|
186 | modified.append(lfile) | |
|
187 | elif ctx1[standin].data().strip() \ | |
|
188 | != lfutil.hashfile(self.wjoin(lfile)): | |
|
189 | modified.append(lfile) | |
|
190 | else: | |
|
191 | clean.append(lfile) | |
|
192 | lfdirstate.normal(lfile) | |
|
193 | else: | |
|
194 | tocheck = unsure + modified + added + clean | |
|
195 | modified, added, clean = [], [], [] | |
|
196 | ||
|
197 | for lfile in tocheck: | |
|
198 | standin = lfutil.standin(lfile) | |
|
199 | if inctx(standin, ctx1): | |
|
200 | if ctx1[standin].data().strip() != \ | |
|
201 | lfutil.hashfile(self.wjoin(lfile)): | |
|
206 | 202 | modified.append(lfile) |
|
207 | 203 | else: |
|
208 | 204 | clean.append(lfile) |
|
209 |
|
|
|
210 |
|
|
|
211 | tocheck = unsure + modified + added + clean | |
|
212 | modified, added, clean = [], [], [] | |
|
205 | else: | |
|
206 | added.append(lfile) | |
|
213 | 207 | |
|
214 | for lfile in tocheck: | |
|
215 | standin = lfutil.standin(lfile) | |
|
216 | if inctx(standin, ctx1): | |
|
217 | if ctx1[standin].data().strip() != \ | |
|
218 | lfutil.hashfile(self.wjoin(lfile)): | |
|
219 | modified.append(lfile) | |
|
220 | else: | |
|
221 | clean.append(lfile) | |
|
222 | else: | |
|
223 | added.append(lfile) | |
|
224 | finally: | |
|
225 | # Replace the original ignore function | |
|
226 | lfdirstate._ignore = origignore | |
|
227 | ||
|
208 | # Standins no longer found in lfdirstate has been removed | |
|
228 | 209 | for standin in ctx1.manifest(): |
|
229 | 210 | if not lfutil.isstandin(standin): |
|
230 | 211 | continue |
@@ -239,20 +220,17 b' def reposetup(ui, repo):' | |||
|
239 | 220 | |
|
240 | 221 | # Largefiles are not really removed when they're |
|
241 | 222 | # still in the normal dirstate. Likewise, normal |
|
242 |
# files are not really removed if |
|
|
223 | # files are not really removed if they are still in | |
|
243 | 224 | # lfdirstate. This happens in merges where files |
|
244 | 225 | # change type. |
|
245 | 226 | removed = [f for f in removed if f not in self.dirstate] |
|
246 | 227 | result[2] = [f for f in result[2] if f not in lfdirstate] |
|
247 | 228 | |
|
229 | lfiles = set(lfdirstate._map) | |
|
248 | 230 | # Unknown files |
|
249 |
|
|
|
250 | result[4] = [f for f in unknown | |
|
251 | if (self.dirstate[f] == '?' and | |
|
252 | not lfutil.isstandin(f))] | |
|
253 | # Ignored files were calculated earlier by the dirstate, | |
|
254 | # and we already stripped out the largefiles from the list | |
|
255 | result[5] = ignored | |
|
231 | result[4] = set(result[4]).difference(lfiles) | |
|
232 | # Ignored files | |
|
233 | result[5] = set(result[5]).difference(lfiles) | |
|
256 | 234 | # combine normal files and largefiles |
|
257 | 235 | normals = [[fn for fn in filelist |
|
258 | 236 | if not lfutil.isstandin(fn)] |
@@ -361,7 +339,7 b' def reposetup(ui, repo):' | |||
|
361 | 339 | # Case 2: user calls commit with specified patterns: refresh |
|
362 | 340 | # any matching big files. |
|
363 | 341 | smatcher = lfutil.composestandinmatcher(self, match) |
|
364 |
standins = |
|
|
342 | standins = self.dirstate.walk(smatcher, [], False, False) | |
|
365 | 343 | |
|
366 | 344 | # No matching big files: get out of the way and pass control to |
|
367 | 345 | # the usual commit() method. |
@@ -377,7 +355,7 b' def reposetup(ui, repo):' | |||
|
377 | 355 | lfdirstate = lfutil.openlfdirstate(ui, self) |
|
378 | 356 | for standin in standins: |
|
379 | 357 | lfile = lfutil.splitstandin(standin) |
|
380 |
if lfdirstate[lfile] |
|
|
358 | if lfdirstate[lfile] != 'r': | |
|
381 | 359 | lfutil.updatestandin(self, standin) |
|
382 | 360 | lfdirstate.normal(lfile) |
|
383 | 361 | else: |
@@ -427,10 +405,11 b' def reposetup(ui, repo):' | |||
|
427 | 405 | wlock.release() |
|
428 | 406 | |
|
429 | 407 | def push(self, remote, force=False, revs=None, newbranch=False): |
|
430 |
o = |
|
|
431 | if o: | |
|
408 | outgoing = discovery.findcommonoutgoing(repo, remote.peer(), | |
|
409 | force=force) | |
|
410 | if outgoing.missing: | |
|
432 | 411 | toupload = set() |
|
433 | o = self.changelog.nodesbetween(o, revs)[0] | |
|
412 | o = self.changelog.nodesbetween(outgoing.missing, revs)[0] | |
|
434 | 413 | for n in o: |
|
435 | 414 | parents = [p for p in self.changelog.parents(n) |
|
436 | 415 | if p != node_.nullid] |
@@ -9,9 +9,9 b'' | |||
|
9 | 9 | '''setup for largefiles extension: uisetup''' |
|
10 | 10 | |
|
11 | 11 | from mercurial import archival, cmdutil, commands, extensions, filemerge, hg, \ |
|
12 |
httppeer, localrepo, merge, scmutil, sshpeer, |
|
|
12 | httppeer, localrepo, merge, scmutil, sshpeer, wireproto | |
|
13 | 13 | from mercurial.i18n import _ |
|
14 |
from mercurial.hgweb import hgweb_mod, |
|
|
14 | from mercurial.hgweb import hgweb_mod, webcommands | |
|
15 | 15 | from mercurial.subrepo import hgsubrepo |
|
16 | 16 | |
|
17 | 17 | import overrides |
@@ -59,6 +59,11 b' def uisetup(ui):' | |||
|
59 | 59 | _('verify largefile contents not just existence'))] |
|
60 | 60 | entry[1].extend(verifyopt) |
|
61 | 61 | |
|
62 | entry = extensions.wrapcommand(commands.table, 'debugstate', | |
|
63 | overrides.overridedebugstate) | |
|
64 | debugstateopt = [('', 'large', None, _('display largefiles dirstate'))] | |
|
65 | entry[1].extend(debugstateopt) | |
|
66 | ||
|
62 | 67 | entry = extensions.wrapcommand(commands.table, 'outgoing', |
|
63 | 68 | overrides.overrideoutgoing) |
|
64 | 69 | outgoingopt = [('', 'large', None, _('display outgoing largefiles'))] |
@@ -139,11 +144,6 b' def uisetup(ui):' | |||
|
139 | 144 | proto.capabilitiesorig = wireproto.capabilities |
|
140 | 145 | wireproto.capabilities = proto.capabilities |
|
141 | 146 | |
|
142 | # these let us reject non-largefiles clients and make them display | |
|
143 | # our error messages | |
|
144 | protocol.webproto.refuseclient = proto.webprotorefuseclient | |
|
145 | sshserver.sshserver.refuseclient = proto.sshprotorefuseclient | |
|
146 | ||
|
147 | 147 | # can't do this in reposetup because it needs to have happened before |
|
148 | 148 | # wirerepo.__init__ is called |
|
149 | 149 | proto.ssholdcallstream = sshpeer.sshpeer._callstream |
@@ -63,7 +63,7 b' from mercurial.i18n import _' | |||
|
63 | 63 | from mercurial.node import bin, hex, short, nullid, nullrev |
|
64 | 64 | from mercurial.lock import release |
|
65 | 65 | from mercurial import commands, cmdutil, hg, scmutil, util, revset |
|
66 |
from mercurial import repair, extensions, error, phases |
|
|
66 | from mercurial import repair, extensions, error, phases | |
|
67 | 67 | from mercurial import patch as patchmod |
|
68 | 68 | import os, re, errno, shutil |
|
69 | 69 | |
@@ -275,6 +275,7 b' def newcommit(repo, phase, *args, **kwar' | |||
|
275 | 275 | It should be used instead of repo.commit inside the mq source for operation |
|
276 | 276 | creating new changeset. |
|
277 | 277 | """ |
|
278 | repo = repo.unfiltered() | |
|
278 | 279 | if phase is None: |
|
279 | 280 | if repo.ui.configbool('mq', 'secret', False): |
|
280 | 281 | phase = phases.secret |
@@ -826,7 +827,11 b' class queue(object):' | |||
|
826 | 827 | if r: |
|
827 | 828 | r[None].forget(patches) |
|
828 | 829 | for p in patches: |
|
829 | os.unlink(self.join(p)) | |
|
830 | try: | |
|
831 | os.unlink(self.join(p)) | |
|
832 | except OSError, inst: | |
|
833 | if inst.errno != errno.ENOENT: | |
|
834 | raise | |
|
830 | 835 | |
|
831 | 836 | qfinished = [] |
|
832 | 837 | if numrevs: |
@@ -924,11 +929,11 b' class queue(object):' | |||
|
924 | 929 | self._cleanup(realpatches, numrevs, opts.get('keep')) |
|
925 | 930 | |
|
926 | 931 | def checktoppatch(self, repo): |
|
932 | '''check that working directory is at qtip''' | |
|
927 | 933 | if self.applied: |
|
928 | 934 | top = self.applied[-1].node |
|
929 | 935 | patch = self.applied[-1].name |
|
930 |
|
|
|
931 | if top not in pp: | |
|
936 | if repo.dirstate.p1() != top: | |
|
932 | 937 | raise util.Abort(_("working directory revision is not qtip")) |
|
933 | 938 | return top, patch |
|
934 | 939 | return None, None |
@@ -942,7 +947,7 b' class queue(object):' | |||
|
942 | 947 | bctx = repo[baserev] |
|
943 | 948 | else: |
|
944 | 949 | bctx = wctx.parents()[0] |
|
945 | for s in wctx.substate: | |
|
950 | for s in sorted(wctx.substate): | |
|
946 | 951 | if wctx.sub(s).dirty(True): |
|
947 | 952 | raise util.Abort( |
|
948 | 953 | _("uncommitted changes in subrepository %s") % s) |
@@ -1146,7 +1151,7 b' class queue(object):' | |||
|
1146 | 1151 | return matches[0] |
|
1147 | 1152 | if self.series and self.applied: |
|
1148 | 1153 | if s == 'qtip': |
|
1149 | return self.series[self.seriesend(True)-1] | |
|
1154 | return self.series[self.seriesend(True) - 1] | |
|
1150 | 1155 | if s == 'qbase': |
|
1151 | 1156 | return self.series[0] |
|
1152 | 1157 | return None |
@@ -1324,11 +1329,7 b' class queue(object):' | |||
|
1324 | 1329 | # created while patching |
|
1325 | 1330 | for f in all_files: |
|
1326 | 1331 | if f not in repo.dirstate: |
|
1327 | try: | |
|
1328 | util.unlinkpath(repo.wjoin(f)) | |
|
1329 | except OSError, inst: | |
|
1330 | if inst.errno != errno.ENOENT: | |
|
1331 | raise | |
|
1332 | util.unlinkpath(repo.wjoin(f), ignoremissing=True) | |
|
1332 | 1333 | self.ui.warn(_('done\n')) |
|
1333 | 1334 | raise |
|
1334 | 1335 | |
@@ -1405,8 +1406,6 b' class queue(object):' | |||
|
1405 | 1406 | self.applieddirty = True |
|
1406 | 1407 | end = len(self.applied) |
|
1407 | 1408 | rev = self.applied[start].node |
|
1408 | if update: | |
|
1409 | top = self.checktoppatch(repo)[0] | |
|
1410 | 1409 | |
|
1411 | 1410 | try: |
|
1412 | 1411 | heads = repo.changelog.heads(rev) |
@@ -1427,7 +1426,7 b' class queue(object):' | |||
|
1427 | 1426 | if update: |
|
1428 | 1427 | qp = self.qparents(repo, rev) |
|
1429 | 1428 | ctx = repo[qp] |
|
1430 |
m, a, r, d = repo.status(qp, |
|
|
1429 | m, a, r, d = repo.status(qp, '.')[:4] | |
|
1431 | 1430 | if d: |
|
1432 | 1431 | raise util.Abort(_("deletions found between repo revs")) |
|
1433 | 1432 | |
@@ -1437,11 +1436,7 b' class queue(object):' | |||
|
1437 | 1436 | self.backup(repo, tobackup) |
|
1438 | 1437 | |
|
1439 | 1438 | for f in a: |
|
1440 | try: | |
|
1441 | util.unlinkpath(repo.wjoin(f)) | |
|
1442 | except OSError, e: | |
|
1443 | if e.errno != errno.ENOENT: | |
|
1444 | raise | |
|
1439 | util.unlinkpath(repo.wjoin(f), ignoremissing=True) | |
|
1445 | 1440 | repo.dirstate.drop(f) |
|
1446 | 1441 | for f in m + r: |
|
1447 | 1442 | fctx = ctx[f] |
@@ -1625,7 +1620,7 b' class queue(object):' | |||
|
1625 | 1620 | # if the patch excludes a modified file, mark that |
|
1626 | 1621 | # file with mtime=0 so status can see it. |
|
1627 | 1622 | mm = [] |
|
1628 | for i in xrange(len(m)-1, -1, -1): | |
|
1623 | for i in xrange(len(m) - 1, -1, -1): | |
|
1629 | 1624 | if not matchfn(m[i]): |
|
1630 | 1625 | mm.append(m[i]) |
|
1631 | 1626 | del m[i] |
@@ -1675,9 +1670,10 b' class queue(object):' | |||
|
1675 | 1670 | patchf.write(chunk) |
|
1676 | 1671 | patchf.close() |
|
1677 | 1672 | |
|
1673 | marks = repo._bookmarks | |
|
1678 | 1674 | for bm in bmlist: |
|
1679 |
|
|
|
1680 |
|
|
|
1675 | marks[bm] = n | |
|
1676 | marks.write() | |
|
1681 | 1677 | |
|
1682 | 1678 | self.applied.append(statusentry(n, patchfn)) |
|
1683 | 1679 | except: # re-raises |
@@ -2999,7 +2995,7 b' def strip(ui, repo, *revs, **opts):' | |||
|
2999 | 2995 | revs.update(set(rsrevs)) |
|
3000 | 2996 | if not revs: |
|
3001 | 2997 | del marks[mark] |
|
3002 |
|
|
|
2998 | marks.write() | |
|
3003 | 2999 | ui.write(_("bookmark '%s' deleted\n") % mark) |
|
3004 | 3000 | |
|
3005 | 3001 | if not revs: |
@@ -3036,7 +3032,7 b' def strip(ui, repo, *revs, **opts):' | |||
|
3036 | 3032 | del q.applied[start:end] |
|
3037 | 3033 | q.savedirty() |
|
3038 | 3034 | |
|
3039 |
revs = |
|
|
3035 | revs = sorted(rootnodes) | |
|
3040 | 3036 | if update and opts.get('keep'): |
|
3041 | 3037 | wlock = repo.wlock() |
|
3042 | 3038 | try: |
@@ -3049,7 +3045,7 b' def strip(ui, repo, *revs, **opts):' | |||
|
3049 | 3045 | |
|
3050 | 3046 | if opts.get('bookmark'): |
|
3051 | 3047 | del marks[mark] |
|
3052 |
|
|
|
3048 | marks.write() | |
|
3053 | 3049 | ui.write(_("bookmark '%s' deleted\n") % mark) |
|
3054 | 3050 | |
|
3055 | 3051 | repo.mq.strip(repo, revs, backup=backup, update=update, |
@@ -3435,7 +3431,7 b' def reposetup(ui, repo):' | |||
|
3435 | 3431 | outapplied.pop() |
|
3436 | 3432 | # looking for pushed and shared changeset |
|
3437 | 3433 | for node in outapplied: |
|
3438 |
if |
|
|
3434 | if self[node].phase() < phases.secret: | |
|
3439 | 3435 | raise util.Abort(_('source has mq patches applied')) |
|
3440 | 3436 | # no non-secret patches pushed |
|
3441 | 3437 | super(mqrepo, self).checkpush(force, revs) |
@@ -3451,7 +3447,8 b' def reposetup(ui, repo):' | |||
|
3451 | 3447 | mqtags = [(patch.node, patch.name) for patch in q.applied] |
|
3452 | 3448 | |
|
3453 | 3449 | try: |
|
3454 | self.changelog.rev(mqtags[-1][0]) | |
|
3450 | # for now ignore filtering business | |
|
3451 | self.unfiltered().changelog.rev(mqtags[-1][0]) | |
|
3455 | 3452 | except error.LookupError: |
|
3456 | 3453 | self.ui.warn(_('mq status file refers to unknown node %s\n') |
|
3457 | 3454 | % short(mqtags[-1][0])) |
@@ -3470,41 +3467,6 b' def reposetup(ui, repo):' | |||
|
3470 | 3467 | |
|
3471 | 3468 | return result |
|
3472 | 3469 | |
|
3473 | def _branchtags(self, partial, lrev): | |
|
3474 | q = self.mq | |
|
3475 | cl = self.changelog | |
|
3476 | qbase = None | |
|
3477 | if not q.applied: | |
|
3478 | if getattr(self, '_committingpatch', False): | |
|
3479 | # Committing a new patch, must be tip | |
|
3480 | qbase = len(cl) - 1 | |
|
3481 | else: | |
|
3482 | qbasenode = q.applied[0].node | |
|
3483 | try: | |
|
3484 | qbase = cl.rev(qbasenode) | |
|
3485 | except error.LookupError: | |
|
3486 | self.ui.warn(_('mq status file refers to unknown node %s\n') | |
|
3487 | % short(qbasenode)) | |
|
3488 | if qbase is None: | |
|
3489 | return super(mqrepo, self)._branchtags(partial, lrev) | |
|
3490 | ||
|
3491 | start = lrev + 1 | |
|
3492 | if start < qbase: | |
|
3493 | # update the cache (excluding the patches) and save it | |
|
3494 | ctxgen = (self[r] for r in xrange(lrev + 1, qbase)) | |
|
3495 | self._updatebranchcache(partial, ctxgen) | |
|
3496 | self._writebranchcache(partial, cl.node(qbase - 1), qbase - 1) | |
|
3497 | start = qbase | |
|
3498 | # if start = qbase, the cache is as updated as it should be. | |
|
3499 | # if start > qbase, the cache includes (part of) the patches. | |
|
3500 | # we might as well use it, but we won't save it. | |
|
3501 | ||
|
3502 | # update the cache up to the tip | |
|
3503 | ctxgen = (self[r] for r in xrange(start, len(cl))) | |
|
3504 | self._updatebranchcache(partial, ctxgen) | |
|
3505 | ||
|
3506 | return partial | |
|
3507 | ||
|
3508 | 3470 | if repo.local(): |
|
3509 | 3471 | repo.__class__ = mqrepo |
|
3510 | 3472 |
@@ -474,11 +474,11 b' def patchbomb(ui, repo, *revs, **opts):' | |||
|
474 | 474 | |
|
475 | 475 | if opts.get('diffstat') or opts.get('confirm'): |
|
476 | 476 | ui.write(_('\nFinal summary:\n\n')) |
|
477 | ui.write('From: %s\n' % sender) | |
|
477 | ui.write(('From: %s\n' % sender)) | |
|
478 | 478 | for addr in showaddrs: |
|
479 | 479 | ui.write('%s\n' % addr) |
|
480 | 480 | for m, subj, ds in msgs: |
|
481 | ui.write('Subject: %s\n' % subj) | |
|
481 | ui.write(('Subject: %s\n' % subj)) | |
|
482 | 482 | if ds: |
|
483 | 483 | ui.write(ds) |
|
484 | 484 | ui.write('\n') |
@@ -23,6 +23,7 b' from mercurial.i18n import _' | |||
|
23 | 23 | import os, errno |
|
24 | 24 | |
|
25 | 25 | nullmerge = -2 |
|
26 | revignored = -3 | |
|
26 | 27 | |
|
27 | 28 | cmdtable = {} |
|
28 | 29 | command = cmdutil.command(cmdtable) |
@@ -184,8 +185,6 b' def rebase(ui, repo, **opts):' | |||
|
184 | 185 | rebaseset = repo.revs( |
|
185 | 186 | '(children(ancestor(%ld, %d)) and ::(%ld))::', |
|
186 | 187 | base, dest, base) |
|
187 | # temporary top level filtering of extinct revisions | |
|
188 | rebaseset = repo.revs('%ld - hidden()', rebaseset) | |
|
189 | 188 | if rebaseset: |
|
190 | 189 | root = min(rebaseset) |
|
191 | 190 | else: |
@@ -194,8 +193,9 b' def rebase(ui, repo, **opts):' | |||
|
194 | 193 | if not rebaseset: |
|
195 | 194 | repo.ui.debug('base is ancestor of destination\n') |
|
196 | 195 | result = None |
|
197 | elif not keepf and repo.revs('first(children(%ld) - %ld)-hidden()', | |
|
198 | rebaseset, rebaseset): | |
|
196 | elif (not (keepf or obsolete._enabled) | |
|
197 | and repo.revs('first(children(%ld) - %ld)', | |
|
198 | rebaseset, rebaseset)): | |
|
199 | 199 | raise util.Abort( |
|
200 | 200 | _("can't remove original changesets with" |
|
201 | 201 | " unrebased descendants"), |
@@ -214,8 +214,8 b' def rebase(ui, repo, **opts):' | |||
|
214 | 214 | else: |
|
215 | 215 | originalwd, target, state = result |
|
216 | 216 | if collapsef: |
|
217 |
targetancestors = |
|
|
218 | targetancestors.add(target) | |
|
217 | targetancestors = repo.changelog.ancestors([target], | |
|
218 | inclusive=True) | |
|
219 | 219 | external = checkexternal(repo, state, targetancestors) |
|
220 | 220 | |
|
221 | 221 | if keepbranchesf: |
@@ -233,8 +233,7 b' def rebase(ui, repo, **opts):' | |||
|
233 | 233 | |
|
234 | 234 | # Rebase |
|
235 | 235 | if not targetancestors: |
|
236 |
targetancestors = |
|
|
237 | targetancestors.add(target) | |
|
236 | targetancestors = repo.changelog.ancestors([target], inclusive=True) | |
|
238 | 237 | |
|
239 | 238 | # Keep track of the current bookmarks in order to reset them later |
|
240 | 239 | currentbookmarks = repo._bookmarks.copy() |
@@ -294,7 +293,7 b' def rebase(ui, repo, **opts):' | |||
|
294 | 293 | else: |
|
295 | 294 | commitmsg = 'Collapsed revision' |
|
296 | 295 | for rebased in state: |
|
297 |
if rebased not in skipped and state[rebased] |
|
|
296 | if rebased not in skipped and state[rebased] > nullmerge: | |
|
298 | 297 | commitmsg += '\n* %s' % repo[rebased].description() |
|
299 | 298 | commitmsg = ui.edit(commitmsg, repo.ui.username()) |
|
300 | 299 | newrev = concludenode(repo, rev, p1, external, commitmsg=commitmsg, |
@@ -307,22 +306,21 b' def rebase(ui, repo, **opts):' | |||
|
307 | 306 | # Nodeids are needed to reset bookmarks |
|
308 | 307 | nstate = {} |
|
309 | 308 | for k, v in state.iteritems(): |
|
310 |
if v |
|
|
309 | if v > nullmerge: | |
|
311 | 310 | nstate[repo[k].node()] = repo[v].node() |
|
312 | 311 | |
|
313 | 312 | if not keepf: |
|
314 | 313 | collapsedas = None |
|
315 | 314 | if collapsef: |
|
316 | 315 | collapsedas = newrev |
|
317 | clearrebased(ui, repo, state, collapsedas) | |
|
316 | clearrebased(ui, repo, state, skipped, collapsedas) | |
|
318 | 317 | |
|
319 | 318 | if currentbookmarks: |
|
320 | 319 | updatebookmarks(repo, nstate, currentbookmarks, **opts) |
|
321 | 320 | |
|
322 | 321 | clearstatus(repo) |
|
323 | 322 | ui.note(_("rebase completed\n")) |
|
324 |
|
|
|
325 | util.unlinkpath(repo.sjoin('undo')) | |
|
323 | util.unlinkpath(repo.sjoin('undo'), ignoremissing=True) | |
|
326 | 324 | if skipped: |
|
327 | 325 | ui.note(_("%d revisions have been skipped\n") % len(skipped)) |
|
328 | 326 | |
@@ -395,6 +393,15 b' def rebasenode(repo, rev, p1, state, col' | |||
|
395 | 393 | # have to allow merging with it. |
|
396 | 394 | return merge.update(repo, rev, True, True, False, base, collapse) |
|
397 | 395 | |
|
396 | def nearestrebased(repo, rev, state): | |
|
397 | """return the nearest ancestors of rev in the rebase result""" | |
|
398 | rebased = [r for r in state if state[r] > nullmerge] | |
|
399 | candidates = repo.revs('max(%ld and (::%d))', rebased, rev) | |
|
400 | if candidates: | |
|
401 | return state[candidates[0]] | |
|
402 | else: | |
|
403 | return None | |
|
404 | ||
|
398 | 405 | def defineparents(repo, rev, target, state, targetancestors): |
|
399 | 406 | 'Return the new parent relationship of the revision that will be rebased' |
|
400 | 407 | parents = repo[rev].parents() |
@@ -406,6 +413,10 b' def defineparents(repo, rev, target, sta' | |||
|
406 | 413 | elif P1n in state: |
|
407 | 414 | if state[P1n] == nullmerge: |
|
408 | 415 | p1 = target |
|
416 | elif state[P1n] == revignored: | |
|
417 | p1 = nearestrebased(repo, P1n, state) | |
|
418 | if p1 is None: | |
|
419 | p1 = target | |
|
409 | 420 | else: |
|
410 | 421 | p1 = state[P1n] |
|
411 | 422 | else: # P1n external |
@@ -418,6 +429,11 b' def defineparents(repo, rev, target, sta' | |||
|
418 | 429 | if P2n in state: |
|
419 | 430 | if p1 == target: # P1n in targetancestors or external |
|
420 | 431 | p1 = state[P2n] |
|
432 | elif state[P2n] == revignored: | |
|
433 | p2 = nearestrebased(repo, P2n, state) | |
|
434 | if p2 is None: | |
|
435 | # no ancestors rebased yet, detach | |
|
436 | p2 = target | |
|
421 | 437 | else: |
|
422 | 438 | p2 = state[P2n] |
|
423 | 439 | else: # P2n external |
@@ -479,13 +495,14 b' def updatemq(repo, state, skipped, **opt' | |||
|
479 | 495 | |
|
480 | 496 | def updatebookmarks(repo, nstate, originalbookmarks, **opts): |
|
481 | 497 | 'Move bookmarks to their correct changesets' |
|
498 | marks = repo._bookmarks | |
|
482 | 499 | for k, v in originalbookmarks.iteritems(): |
|
483 | 500 | if v in nstate: |
|
484 |
if nstate[v] |
|
|
501 | if nstate[v] > nullmerge: | |
|
485 | 502 | # update the bookmarks for revs that have moved |
|
486 |
|
|
|
503 | marks[k] = nstate[v] | |
|
487 | 504 | |
|
488 |
|
|
|
505 | marks.write() | |
|
489 | 506 | |
|
490 | 507 | def storestatus(repo, originalwd, target, state, collapse, keep, keepbranches, |
|
491 | 508 | external): |
@@ -499,7 +516,7 b' def storestatus(repo, originalwd, target' | |||
|
499 | 516 | f.write('%d\n' % int(keepbranches)) |
|
500 | 517 | for d, v in state.iteritems(): |
|
501 | 518 | oldrev = repo[d].hex() |
|
502 |
if v |
|
|
519 | if v > nullmerge: | |
|
503 | 520 | newrev = repo[v].hex() |
|
504 | 521 | else: |
|
505 | 522 | newrev = v |
@@ -509,8 +526,7 b' def storestatus(repo, originalwd, target' | |||
|
509 | 526 | |
|
510 | 527 | def clearstatus(repo): |
|
511 | 528 | 'Remove the status files' |
|
512 |
|
|
|
513 | util.unlinkpath(repo.join("rebasestate")) | |
|
529 | util.unlinkpath(repo.join("rebasestate"), ignoremissing=True) | |
|
514 | 530 | |
|
515 | 531 | def restorestatus(repo): |
|
516 | 532 | 'Restore a previously stored status' |
@@ -535,10 +551,10 b' def restorestatus(repo):' | |||
|
535 | 551 | keepbranches = bool(int(l)) |
|
536 | 552 | else: |
|
537 | 553 | oldrev, newrev = l.split(':') |
|
538 |
if newrev |
|
|
554 | if newrev in (str(nullmerge), str(revignored)): | |
|
555 | state[repo[oldrev].rev()] = int(newrev) | |
|
556 | else: | |
|
539 | 557 | state[repo[oldrev].rev()] = repo[newrev].rev() |
|
540 | else: | |
|
541 | state[repo[oldrev].rev()] = int(newrev) | |
|
542 | 558 | skipped = set() |
|
543 | 559 | # recompute the set of skipped revs |
|
544 | 560 | if not collapse: |
@@ -577,9 +593,9 b' def abort(repo, originalwd, target, stat' | |||
|
577 | 593 | merge.update(repo, repo[originalwd].rev(), False, True, False) |
|
578 | 594 | rebased = filter(lambda x: x > -1 and x != target, state.values()) |
|
579 | 595 | if rebased: |
|
580 |
strippoint = |
|
|
596 | strippoints = [c.node() for c in repo.set('roots(%ld)', rebased)] | |
|
581 | 597 | # no backup of rebased cset versions needed |
|
582 |
repair.strip(repo.ui, repo, |
|
|
598 | repair.strip(repo.ui, repo, strippoints) | |
|
583 | 599 | clearstatus(repo) |
|
584 | 600 | repo.ui.warn(_('rebase aborted\n')) |
|
585 | 601 | return 0 |
@@ -602,65 +618,77 b' def buildstate(repo, dest, rebaseset, co' | |||
|
602 | 618 | roots = list(repo.set('roots(%ld)', rebaseset)) |
|
603 | 619 | if not roots: |
|
604 | 620 | raise util.Abort(_('no matching revisions')) |
|
605 | if len(roots) > 1: | |
|
606 | raise util.Abort(_("can't rebase multiple roots")) | |
|
607 | root = roots[0] | |
|
608 | ||
|
609 | commonbase = root.ancestor(dest) | |
|
610 | if commonbase == root: | |
|
611 | raise util.Abort(_('source is ancestor of destination')) | |
|
612 | if commonbase == dest: | |
|
613 | samebranch = root.branch() == dest.branch() | |
|
614 | if not collapse and samebranch and root in dest.children(): | |
|
615 | repo.ui.debug('source is a child of destination\n') | |
|
616 | return None | |
|
621 | roots.sort() | |
|
622 | state = {} | |
|
623 | detachset = set() | |
|
624 | for root in roots: | |
|
625 | commonbase = root.ancestor(dest) | |
|
626 | if commonbase == root: | |
|
627 | raise util.Abort(_('source is ancestor of destination')) | |
|
628 | if commonbase == dest: | |
|
629 | samebranch = root.branch() == dest.branch() | |
|
630 | if not collapse and samebranch and root in dest.children(): | |
|
631 | repo.ui.debug('source is a child of destination\n') | |
|
632 | return None | |
|
617 | 633 | |
|
618 |
repo.ui.debug('rebase onto %d starting from % |
|
|
619 |
state |
|
|
620 | # Rebase tries to turn <dest> into a parent of <root> while | |
|
621 | # preserving the number of parents of rebased changesets: | |
|
622 | # | |
|
623 | # - A changeset with a single parent will always be rebased as a | |
|
624 | # changeset with a single parent. | |
|
625 | # | |
|
626 | # - A merge will be rebased as merge unless its parents are both | |
|
627 | # ancestors of <dest> or are themselves in the rebased set and | |
|
628 | # pruned while rebased. | |
|
629 | # | |
|
630 | # If one parent of <root> is an ancestor of <dest>, the rebased | |
|
631 | # version of this parent will be <dest>. This is always true with | |
|
632 | # --base option. | |
|
633 | # | |
|
634 | # Otherwise, we need to *replace* the original parents with | |
|
635 | # <dest>. This "detaches" the rebased set from its former location | |
|
636 | # and rebases it onto <dest>. Changes introduced by ancestors of | |
|
637 | # <root> not common with <dest> (the detachset, marked as | |
|
638 | # nullmerge) are "removed" from the rebased changesets. | |
|
639 | # | |
|
640 | # - If <root> has a single parent, set it to <dest>. | |
|
641 | # | |
|
642 | # - If <root> is a merge, we cannot decide which parent to | |
|
643 | # replace, the rebase operation is not clearly defined. | |
|
644 | # | |
|
645 | # The table below sums up this behavior: | |
|
646 | # | |
|
647 |
# + |
|
|
648 |
|
|
|
649 |
# + |
|
|
650 |
# | parent in |
|
|
651 |
|
|
|
652 |
# + |
|
|
653 |
# | unrelated source |
|
|
654 |
# + |
|
|
655 | # | |
|
656 | # The actual abort is handled by `defineparents` | |
|
657 | if len(root.parents()) <= 1: | |
|
658 |
# |
|
|
659 | detachset = repo.revs('::%d - ::%d - %d', root, commonbase, root) | |
|
660 | state.update(dict.fromkeys(detachset, nullmerge)) | |
|
634 | repo.ui.debug('rebase onto %d starting from %s\n' % (dest, roots)) | |
|
635 | state.update(dict.fromkeys(rebaseset, nullrev)) | |
|
636 | # Rebase tries to turn <dest> into a parent of <root> while | |
|
637 | # preserving the number of parents of rebased changesets: | |
|
638 | # | |
|
639 | # - A changeset with a single parent will always be rebased as a | |
|
640 | # changeset with a single parent. | |
|
641 | # | |
|
642 | # - A merge will be rebased as merge unless its parents are both | |
|
643 | # ancestors of <dest> or are themselves in the rebased set and | |
|
644 | # pruned while rebased. | |
|
645 | # | |
|
646 | # If one parent of <root> is an ancestor of <dest>, the rebased | |
|
647 | # version of this parent will be <dest>. This is always true with | |
|
648 | # --base option. | |
|
649 | # | |
|
650 | # Otherwise, we need to *replace* the original parents with | |
|
651 | # <dest>. This "detaches" the rebased set from its former location | |
|
652 | # and rebases it onto <dest>. Changes introduced by ancestors of | |
|
653 | # <root> not common with <dest> (the detachset, marked as | |
|
654 | # nullmerge) are "removed" from the rebased changesets. | |
|
655 | # | |
|
656 | # - If <root> has a single parent, set it to <dest>. | |
|
657 | # | |
|
658 | # - If <root> is a merge, we cannot decide which parent to | |
|
659 | # replace, the rebase operation is not clearly defined. | |
|
660 | # | |
|
661 | # The table below sums up this behavior: | |
|
662 | # | |
|
663 | # +------------------+----------------------+-------------------------+ | |
|
664 | # | | one parent | merge | | |
|
665 | # +------------------+----------------------+-------------------------+ | |
|
666 | # | parent in | new parent is <dest> | parents in ::<dest> are | | |
|
667 | # | ::<dest> | | remapped to <dest> | | |
|
668 | # +------------------+----------------------+-------------------------+ | |
|
669 | # | unrelated source | new parent is <dest> | ambiguous, abort | | |
|
670 | # +------------------+----------------------+-------------------------+ | |
|
671 | # | |
|
672 | # The actual abort is handled by `defineparents` | |
|
673 | if len(root.parents()) <= 1: | |
|
674 | # ancestors of <root> not ancestors of <dest> | |
|
675 | detachset.update(repo.changelog.findmissingrevs([commonbase.rev()], | |
|
676 | [root.rev()])) | |
|
677 | for r in detachset: | |
|
678 | if r not in state: | |
|
679 | state[r] = nullmerge | |
|
680 | if len(roots) > 1: | |
|
681 | # If we have multiple roots, we may have "hole" in the rebase set. | |
|
682 | # Rebase roots that descend from those "hole" should not be detached as | |
|
683 | # other root are. We use the special `revignored` to inform rebase that | |
|
684 | # the revision should be ignored but that `defineparent` should search | |
|
685 | # a rebase destination that make sense regarding rebaset topology. | |
|
686 | rebasedomain = set(repo.revs('%ld::%ld', rebaseset, rebaseset)) | |
|
687 | for ignored in set(rebasedomain) - set(rebaseset): | |
|
688 | state[ignored] = revignored | |
|
661 | 689 | return repo['.'].rev(), dest.rev(), state |
|
662 | 690 | |
|
663 | def clearrebased(ui, repo, state, collapsedas=None): | |
|
691 | def clearrebased(ui, repo, state, skipped, collapsedas=None): | |
|
664 | 692 | """dispose of rebased revision at the end of the rebase |
|
665 | 693 | |
|
666 | 694 | If `collapsedas` is not None, the rebase was a collapse whose result if the |
@@ -669,20 +697,28 b' def clearrebased(ui, repo, state, collap' | |||
|
669 | 697 | markers = [] |
|
670 | 698 | for rev, newrev in sorted(state.items()): |
|
671 | 699 | if newrev >= 0: |
|
672 | if collapsedas is not None: | |
|
673 |
|
|
|
674 | markers.append((repo[rev], (repo[newrev],))) | |
|
700 | if rev in skipped: | |
|
701 | succs = () | |
|
702 | elif collapsedas is not None: | |
|
703 | succs = (repo[collapsedas],) | |
|
704 | else: | |
|
705 | succs = (repo[newrev],) | |
|
706 | markers.append((repo[rev], succs)) | |
|
675 | 707 | if markers: |
|
676 | 708 | obsolete.createmarkers(repo, markers) |
|
677 | 709 | else: |
|
678 |
rebased = [rev for rev in state if state[rev] |
|
|
710 | rebased = [rev for rev in state if state[rev] > nullmerge] | |
|
679 | 711 | if rebased: |
|
680 | if set(repo.changelog.descendants([min(rebased)])) - set(state): | |
|
681 | ui.warn(_("warning: new changesets detected " | |
|
682 | "on source branch, not stripping\n")) | |
|
683 | else: | |
|
712 | stripped = [] | |
|
713 | for root in repo.set('roots(%ld)', rebased): | |
|
714 | if set(repo.changelog.descendants([root.rev()])) - set(state): | |
|
715 | ui.warn(_("warning: new changesets detected " | |
|
716 | "on source branch, not stripping\n")) | |
|
717 | else: | |
|
718 | stripped.append(root.node()) | |
|
719 | if stripped: | |
|
684 | 720 | # backup the old csets by default |
|
685 |
repair.strip(ui, repo, |
|
|
721 | repair.strip(ui, repo, stripped, "all") | |
|
686 | 722 | |
|
687 | 723 | |
|
688 | 724 | def pullrebase(orig, ui, repo, *args, **opts): |
@@ -8,7 +8,7 b'' | |||
|
8 | 8 | '''commands to interactively select changes for commit/qrefresh''' |
|
9 | 9 | |
|
10 | 10 | from mercurial.i18n import gettext, _ |
|
11 |
from mercurial import cmdutil, commands, extensions, hg, |
|
|
11 | from mercurial import cmdutil, commands, extensions, hg, patch | |
|
12 | 12 | from mercurial import util |
|
13 | 13 | import copy, cStringIO, errno, os, re, shutil, tempfile |
|
14 | 14 | |
@@ -520,11 +520,11 b' def dorecord(ui, repo, commitfunc, cmdsu' | |||
|
520 | 520 | '(use "hg commit" instead)')) |
|
521 | 521 | |
|
522 | 522 | changes = repo.status(match=match)[:3] |
|
523 |
diffopts = |
|
|
523 | diffopts = patch.diffopts(ui, opts=dict( | |
|
524 | 524 | git=True, nodates=True, |
|
525 | 525 | ignorews=opts.get('ignore_all_space'), |
|
526 | 526 | ignorewsamount=opts.get('ignore_space_change'), |
|
527 | ignoreblanklines=opts.get('ignore_blank_lines')) | |
|
527 | ignoreblanklines=opts.get('ignore_blank_lines'))) | |
|
528 | 528 | chunks = patch.diff(repo, changes=changes, opts=diffopts) |
|
529 | 529 | fp = cStringIO.StringIO() |
|
530 | 530 | fp.write(''.join(chunks)) |
@@ -94,7 +94,8 b' class transplanter(object):' | |||
|
94 | 94 | parentrev = repo.changelog.rev(parent) |
|
95 | 95 | if hasnode(repo, node): |
|
96 | 96 | rev = repo.changelog.rev(node) |
|
97 |
reachable = repo.changelog. |
|
|
97 | reachable = repo.changelog.ancestors([parentrev], rev, | |
|
98 | inclusive=True) | |
|
98 | 99 | if rev in reachable: |
|
99 | 100 | return True |
|
100 | 101 | for t in self.transplants.get(node): |
@@ -103,7 +104,8 b' class transplanter(object):' | |||
|
103 | 104 | self.transplants.remove(t) |
|
104 | 105 | return False |
|
105 | 106 | lnoderev = repo.changelog.rev(t.lnode) |
|
106 |
if lnoderev in repo.changelog. |
|
|
107 | if lnoderev in repo.changelog.ancestors([parentrev], lnoderev, | |
|
108 | inclusive=True): | |
|
107 | 109 | return True |
|
108 | 110 | return False |
|
109 | 111 |
@@ -121,7 +121,7 b' def forbidnewline(ui, repo, hooktype, no' | |||
|
121 | 121 | # changegroup that contains an unacceptable commit followed later |
|
122 | 122 | # by a commit that fixes the problem. |
|
123 | 123 | tip = repo['tip'] |
|
124 | for rev in xrange(len(repo)-1, repo[node].rev()-1, -1): | |
|
124 | for rev in xrange(len(repo) - 1, repo[node].rev() - 1, -1): | |
|
125 | 125 | c = repo[rev] |
|
126 | 126 | for f in c.files(): |
|
127 | 127 | if f in seen or f not in tip or f not in c: |
@@ -5,7 +5,8 b'' | |||
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | import heapq | |
|
8 | import heapq, util | |
|
9 | from node import nullrev | |
|
9 | 10 | |
|
10 | 11 | def ancestor(a, b, pfunc): |
|
11 | 12 | """ |
@@ -89,3 +90,175 b' def ancestor(a, b, pfunc):' | |||
|
89 | 90 | gx = x.next() |
|
90 | 91 | except StopIteration: |
|
91 | 92 | return None |
|
93 | ||
|
94 | def missingancestors(revs, bases, pfunc): | |
|
95 | """Return all the ancestors of revs that are not ancestors of bases. | |
|
96 | ||
|
97 | This may include elements from revs. | |
|
98 | ||
|
99 | Equivalent to the revset (::revs - ::bases). Revs are returned in | |
|
100 | revision number order, which is a topological order. | |
|
101 | ||
|
102 | revs and bases should both be iterables. pfunc must return a list of | |
|
103 | parent revs for a given revs. | |
|
104 | """ | |
|
105 | ||
|
106 | revsvisit = set(revs) | |
|
107 | basesvisit = set(bases) | |
|
108 | if not revsvisit: | |
|
109 | return [] | |
|
110 | if not basesvisit: | |
|
111 | basesvisit.add(nullrev) | |
|
112 | start = max(max(revsvisit), max(basesvisit)) | |
|
113 | bothvisit = revsvisit.intersection(basesvisit) | |
|
114 | revsvisit.difference_update(bothvisit) | |
|
115 | basesvisit.difference_update(bothvisit) | |
|
116 | # At this point, we hold the invariants that: | |
|
117 | # - revsvisit is the set of nodes we know are an ancestor of at least one | |
|
118 | # of the nodes in revs | |
|
119 | # - basesvisit is the same for bases | |
|
120 | # - bothvisit is the set of nodes we know are ancestors of at least one of | |
|
121 | # the nodes in revs and one of the nodes in bases | |
|
122 | # - a node may be in none or one, but not more, of revsvisit, basesvisit | |
|
123 | # and bothvisit at any given time | |
|
124 | # Now we walk down in reverse topo order, adding parents of nodes already | |
|
125 | # visited to the sets while maintaining the invariants. When a node is | |
|
126 | # found in both revsvisit and basesvisit, it is removed from them and | |
|
127 | # added to bothvisit instead. When revsvisit becomes empty, there are no | |
|
128 | # more ancestors of revs that aren't also ancestors of bases, so exit. | |
|
129 | ||
|
130 | missing = [] | |
|
131 | for curr in xrange(start, nullrev, -1): | |
|
132 | if not revsvisit: | |
|
133 | break | |
|
134 | ||
|
135 | if curr in bothvisit: | |
|
136 | bothvisit.remove(curr) | |
|
137 | # curr's parents might have made it into revsvisit or basesvisit | |
|
138 | # through another path | |
|
139 | for p in pfunc(curr): | |
|
140 | revsvisit.discard(p) | |
|
141 | basesvisit.discard(p) | |
|
142 | bothvisit.add(p) | |
|
143 | continue | |
|
144 | ||
|
145 | # curr will never be in both revsvisit and basesvisit, since if it | |
|
146 | # were it'd have been pushed to bothvisit | |
|
147 | if curr in revsvisit: | |
|
148 | missing.append(curr) | |
|
149 | thisvisit = revsvisit | |
|
150 | othervisit = basesvisit | |
|
151 | elif curr in basesvisit: | |
|
152 | thisvisit = basesvisit | |
|
153 | othervisit = revsvisit | |
|
154 | else: | |
|
155 | # not an ancestor of revs or bases: ignore | |
|
156 | continue | |
|
157 | ||
|
158 | thisvisit.remove(curr) | |
|
159 | for p in pfunc(curr): | |
|
160 | if p == nullrev: | |
|
161 | pass | |
|
162 | elif p in othervisit or p in bothvisit: | |
|
163 | # p is implicitly in thisvisit. This means p is or should be | |
|
164 | # in bothvisit | |
|
165 | revsvisit.discard(p) | |
|
166 | basesvisit.discard(p) | |
|
167 | bothvisit.add(p) | |
|
168 | else: | |
|
169 | # visit later | |
|
170 | thisvisit.add(p) | |
|
171 | ||
|
172 | missing.reverse() | |
|
173 | return missing | |
|
174 | ||
|
175 | class lazyancestors(object): | |
|
176 | def __init__(self, cl, revs, stoprev=0, inclusive=False): | |
|
177 | """Create a new object generating ancestors for the given revs. Does | |
|
178 | not generate revs lower than stoprev. | |
|
179 | ||
|
180 | This is computed lazily starting from revs. The object supports | |
|
181 | iteration and membership. | |
|
182 | ||
|
183 | cl should be a changelog and revs should be an iterable. inclusive is | |
|
184 | a boolean that indicates whether revs should be included. Revs lower | |
|
185 | than stoprev will not be generated. | |
|
186 | ||
|
187 | Result does not include the null revision.""" | |
|
188 | self._parentrevs = cl.parentrevs | |
|
189 | self._initrevs = revs | |
|
190 | self._stoprev = stoprev | |
|
191 | self._inclusive = inclusive | |
|
192 | ||
|
193 | # Initialize data structures for __contains__. | |
|
194 | # For __contains__, we use a heap rather than a deque because | |
|
195 | # (a) it minimizes the number of parentrevs calls made | |
|
196 | # (b) it makes the loop termination condition obvious | |
|
197 | # Python's heap is a min-heap. Multiply all values by -1 to convert it | |
|
198 | # into a max-heap. | |
|
199 | self._containsvisit = [-rev for rev in revs] | |
|
200 | heapq.heapify(self._containsvisit) | |
|
201 | if inclusive: | |
|
202 | self._containsseen = set(revs) | |
|
203 | else: | |
|
204 | self._containsseen = set() | |
|
205 | ||
|
206 | def __iter__(self): | |
|
207 | """Generate the ancestors of _initrevs in reverse topological order. | |
|
208 | ||
|
209 | If inclusive is False, yield a sequence of revision numbers starting | |
|
210 | with the parents of each revision in revs, i.e., each revision is *not* | |
|
211 | considered an ancestor of itself. Results are in breadth-first order: | |
|
212 | parents of each rev in revs, then parents of those, etc. | |
|
213 | ||
|
214 | If inclusive is True, yield all the revs first (ignoring stoprev), | |
|
215 | then yield all the ancestors of revs as when inclusive is False. | |
|
216 | If an element in revs is an ancestor of a different rev it is not | |
|
217 | yielded again.""" | |
|
218 | seen = set() | |
|
219 | revs = self._initrevs | |
|
220 | if self._inclusive: | |
|
221 | for rev in revs: | |
|
222 | yield rev | |
|
223 | seen.update(revs) | |
|
224 | ||
|
225 | parentrevs = self._parentrevs | |
|
226 | stoprev = self._stoprev | |
|
227 | visit = util.deque(revs) | |
|
228 | ||
|
229 | while visit: | |
|
230 | for parent in parentrevs(visit.popleft()): | |
|
231 | if parent >= stoprev and parent not in seen: | |
|
232 | visit.append(parent) | |
|
233 | seen.add(parent) | |
|
234 | yield parent | |
|
235 | ||
|
236 | def __contains__(self, target): | |
|
237 | """Test whether target is an ancestor of self._initrevs.""" | |
|
238 | # Trying to do both __iter__ and __contains__ using the same visit | |
|
239 | # heap and seen set is complex enough that it slows down both. Keep | |
|
240 | # them separate. | |
|
241 | seen = self._containsseen | |
|
242 | if target in seen: | |
|
243 | return True | |
|
244 | ||
|
245 | parentrevs = self._parentrevs | |
|
246 | visit = self._containsvisit | |
|
247 | stoprev = self._stoprev | |
|
248 | heappop = heapq.heappop | |
|
249 | heappush = heapq.heappush | |
|
250 | ||
|
251 | targetseen = False | |
|
252 | ||
|
253 | while visit and -visit[0] > target and not targetseen: | |
|
254 | for parent in parentrevs(-heappop(visit)): | |
|
255 | if parent < stoprev or parent in seen: | |
|
256 | continue | |
|
257 | # We need to make sure we push all parents into the heap so | |
|
258 | # that we leave it in a consistent state for future calls. | |
|
259 | heappush(visit, -parent) | |
|
260 | seen.add(parent) | |
|
261 | if parent == target: | |
|
262 | targetseen = True | |
|
263 | ||
|
264 | return targetseen |
@@ -74,8 +74,11 b' class tarit(object):' | |||
|
74 | 74 | def _write_gzip_header(self): |
|
75 | 75 | self.fileobj.write('\037\213') # magic header |
|
76 | 76 | self.fileobj.write('\010') # compression method |
|
77 |
# Python 2.6 deprecate |
|
|
78 | fname = getattr(self, 'name', None) or self.filename | |
|
77 | # Python 2.6 introduced self.name and deprecated self.filename | |
|
78 | try: | |
|
79 | fname = self.name | |
|
80 | except AttributeError: | |
|
81 | fname = self.filename | |
|
79 | 82 | if fname and fname.endswith('.gz'): |
|
80 | 83 | fname = fname[:-3] |
|
81 | 84 | flags = 0 |
@@ -103,7 +106,6 b' class tarit(object):' | |||
|
103 | 106 | self.fileobj = gzfileobj |
|
104 | 107 | return tarfile.TarFile.taropen(name, mode, gzfileobj) |
|
105 | 108 | else: |
|
106 | self.fileobj = fileobj | |
|
107 | 109 | return tarfile.open(name, mode + kind, fileobj) |
|
108 | 110 | |
|
109 | 111 | if isinstance(dest, str): |
@@ -191,7 +193,7 b' class zipit(object):' | |||
|
191 | 193 | 0x5455, # block type: "extended-timestamp" |
|
192 | 194 | 1 + 4, # size of this block |
|
193 | 195 | 1, # "modification time is present" |
|
194 |
self.mtime) # |
|
|
196 | int(self.mtime)) # last modification (UTC) | |
|
195 | 197 | self.z.writestr(i, data) |
|
196 | 198 | |
|
197 | 199 | def done(self): |
@@ -297,7 +299,7 b' def archive(repo, dest, node, kind, deco' | |||
|
297 | 299 | repo.ui.progress(_('archiving'), None) |
|
298 | 300 | |
|
299 | 301 | if subrepos: |
|
300 | for subpath in ctx.substate: | |
|
302 | for subpath in sorted(ctx.substate): | |
|
301 | 303 | sub = ctx.sub(subpath) |
|
302 | 304 | submatch = matchmod.narrowmatcher(subpath, matchfn) |
|
303 | 305 | sub.archive(repo.ui, archiver, prefix, submatch) |
@@ -7,40 +7,80 b'' | |||
|
7 | 7 | |
|
8 | 8 | from mercurial.i18n import _ |
|
9 | 9 | from mercurial.node import hex |
|
10 |
from mercurial import encoding, error, util, obsolete |
|
|
10 | from mercurial import encoding, error, util, obsolete | |
|
11 | 11 | import errno, os |
|
12 | 12 | |
|
13 | def read(repo): | |
|
14 | '''Parse .hg/bookmarks file and return a dictionary | |
|
13 | class bmstore(dict): | |
|
14 | """Storage for bookmarks. | |
|
15 | ||
|
16 | This object should do all bookmark reads and writes, so that it's | |
|
17 | fairly simple to replace the storage underlying bookmarks without | |
|
18 | having to clone the logic surrounding bookmarks. | |
|
19 | ||
|
20 | This particular bmstore implementation stores bookmarks as | |
|
21 | {hash}\s{name}\n (the same format as localtags) in | |
|
22 | .hg/bookmarks. The mapping is stored as {name: nodeid}. | |
|
23 | ||
|
24 | This class does NOT handle the "current" bookmark state at this | |
|
25 | time. | |
|
26 | """ | |
|
15 | 27 | |
|
16 | Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values | |
|
17 | in the .hg/bookmarks file. | |
|
18 | Read the file and return a (name=>nodeid) dictionary | |
|
19 | ''' | |
|
20 | bookmarks = {} | |
|
21 | try: | |
|
22 | for line in repo.opener('bookmarks'): | |
|
23 | line = line.strip() | |
|
24 | if not line: | |
|
25 | continue | |
|
26 | if ' ' not in line: | |
|
27 | repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n') % line) | |
|
28 | continue | |
|
29 |
|
|
|
30 | refspec = encoding.tolocal(refspec) | |
|
28 | def __init__(self, repo): | |
|
29 | dict.__init__(self) | |
|
30 | self._repo = repo | |
|
31 | try: | |
|
32 | for line in repo.vfs('bookmarks'): | |
|
33 | line = line.strip() | |
|
34 | if not line: | |
|
35 | continue | |
|
36 | if ' ' not in line: | |
|
37 | repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n') | |
|
38 | % line) | |
|
39 | continue | |
|
40 | sha, refspec = line.split(' ', 1) | |
|
41 | refspec = encoding.tolocal(refspec) | |
|
42 | try: | |
|
43 | self[refspec] = repo.changelog.lookup(sha) | |
|
44 | except LookupError: | |
|
45 | pass | |
|
46 | except IOError, inst: | |
|
47 | if inst.errno != errno.ENOENT: | |
|
48 | raise | |
|
49 | ||
|
50 | def write(self): | |
|
51 | '''Write bookmarks | |
|
52 | ||
|
53 | Write the given bookmark => hash dictionary to the .hg/bookmarks file | |
|
54 | in a format equal to those of localtags. | |
|
55 | ||
|
56 | We also store a backup of the previous state in undo.bookmarks that | |
|
57 | can be copied back on rollback. | |
|
58 | ''' | |
|
59 | repo = self._repo | |
|
60 | if repo._bookmarkcurrent not in self: | |
|
61 | setcurrent(repo, None) | |
|
62 | ||
|
63 | wlock = repo.wlock() | |
|
64 | try: | |
|
65 | ||
|
66 | file = repo.vfs('bookmarks', 'w', atomictemp=True) | |
|
67 | for name, node in self.iteritems(): | |
|
68 | file.write("%s %s\n" % (hex(node), encoding.fromlocal(name))) | |
|
69 | file.close() | |
|
70 | ||
|
71 | # touch 00changelog.i so hgweb reloads bookmarks (no lock needed) | |
|
31 | 72 | try: |
|
32 | bookmarks[refspec] = repo.changelog.lookup(sha) | |
|
33 |
except |
|
|
73 | os.utime(repo.sjoin('00changelog.i'), None) | |
|
74 | except OSError: | |
|
34 | 75 | pass |
|
35 | except IOError, inst: | |
|
36 | if inst.errno != errno.ENOENT: | |
|
37 |
ra |
|
|
38 | return bookmarks | |
|
76 | ||
|
77 | finally: | |
|
78 | wlock.release() | |
|
39 | 79 | |
|
40 | 80 | def readcurrent(repo): |
|
41 | 81 | '''Get the current bookmark |
|
42 | 82 | |
|
43 |
If we use gittish |
|
|
83 | If we use gittish branches we have a current bookmark that | |
|
44 | 84 | we are on. This function returns the name of the bookmark. It |
|
45 | 85 | is stored in .hg/bookmarks.current |
|
46 | 86 | ''' |
@@ -60,37 +100,6 b' def readcurrent(repo):' | |||
|
60 | 100 | file.close() |
|
61 | 101 | return mark |
|
62 | 102 | |
|
63 | def write(repo): | |
|
64 | '''Write bookmarks | |
|
65 | ||
|
66 | Write the given bookmark => hash dictionary to the .hg/bookmarks file | |
|
67 | in a format equal to those of localtags. | |
|
68 | ||
|
69 | We also store a backup of the previous state in undo.bookmarks that | |
|
70 | can be copied back on rollback. | |
|
71 | ''' | |
|
72 | refs = repo._bookmarks | |
|
73 | ||
|
74 | if repo._bookmarkcurrent not in refs: | |
|
75 | setcurrent(repo, None) | |
|
76 | ||
|
77 | wlock = repo.wlock() | |
|
78 | try: | |
|
79 | ||
|
80 | file = repo.opener('bookmarks', 'w', atomictemp=True) | |
|
81 | for refspec, node in refs.iteritems(): | |
|
82 | file.write("%s %s\n" % (hex(node), encoding.fromlocal(refspec))) | |
|
83 | file.close() | |
|
84 | ||
|
85 | # touch 00changelog.i so hgweb reloads bookmarks (no lock needed) | |
|
86 | try: | |
|
87 | os.utime(repo.sjoin('00changelog.i'), None) | |
|
88 | except OSError: | |
|
89 | pass | |
|
90 | ||
|
91 | finally: | |
|
92 | wlock.release() | |
|
93 | ||
|
94 | 103 | def setcurrent(repo, mark): |
|
95 | 104 | '''Set the name of the bookmark that we are currently on |
|
96 | 105 | |
@@ -152,7 +161,7 b' def update(repo, parents, node):' | |||
|
152 | 161 | if mark != cur: |
|
153 | 162 | del marks[mark] |
|
154 | 163 | if update: |
|
155 |
|
|
|
164 | marks.write() | |
|
156 | 165 | return update |
|
157 | 166 | |
|
158 | 167 | def listbookmarks(repo): |
@@ -179,7 +188,7 b' def pushbookmark(repo, key, old, new):' | |||
|
179 | 188 | if new not in repo: |
|
180 | 189 | return False |
|
181 | 190 | marks[key] = repo[new].node() |
|
182 |
write( |
|
|
191 | marks.write() | |
|
183 | 192 | return True |
|
184 | 193 | finally: |
|
185 | 194 | w.release() |
@@ -188,16 +197,17 b' def updatefromremote(ui, repo, remote, p' | |||
|
188 | 197 | ui.debug("checking for updated bookmarks\n") |
|
189 | 198 | rb = remote.listkeys('bookmarks') |
|
190 | 199 | changed = False |
|
191 | for k in rb.keys(): | |
|
192 | if k in repo._bookmarks: | |
|
193 | nr, nl = rb[k], repo._bookmarks[k] | |
|
200 | localmarks = repo._bookmarks | |
|
201 | for k in sorted(rb): | |
|
202 | if k in localmarks: | |
|
203 | nr, nl = rb[k], localmarks[k] | |
|
194 | 204 | if nr in repo: |
|
195 | 205 | cr = repo[nr] |
|
196 | 206 | cl = repo[nl] |
|
197 | 207 | if cl.rev() >= cr.rev(): |
|
198 | 208 | continue |
|
199 | 209 | if validdest(repo, cl, cr): |
|
200 |
|
|
|
210 | localmarks[k] = cr.node() | |
|
201 | 211 | changed = True |
|
202 | 212 | ui.status(_("updating bookmark %s\n") % k) |
|
203 | 213 | else: |
@@ -208,7 +218,7 b' def updatefromremote(ui, repo, remote, p' | |||
|
208 | 218 | # find a unique @ suffix |
|
209 | 219 | for x in range(1, 100): |
|
210 | 220 | n = '%s@%d' % (kd, x) |
|
211 |
if n not in |
|
|
221 | if n not in localmarks: | |
|
212 | 222 | break |
|
213 | 223 | # try to use an @pathalias suffix |
|
214 | 224 | # if an @pathalias already exists, we overwrite (update) it |
@@ -216,17 +226,17 b' def updatefromremote(ui, repo, remote, p' | |||
|
216 | 226 | if path == u: |
|
217 | 227 | n = '%s@%s' % (kd, p) |
|
218 | 228 | |
|
219 |
|
|
|
229 | localmarks[n] = cr.node() | |
|
220 | 230 | changed = True |
|
221 | 231 | ui.warn(_("divergent bookmark %s stored as %s\n") % (k, n)) |
|
222 | 232 | elif rb[k] in repo: |
|
223 | 233 | # add remote bookmarks for changes we already have |
|
224 |
|
|
|
234 | localmarks[k] = repo[rb[k]].node() | |
|
225 | 235 | changed = True |
|
226 | 236 | ui.status(_("adding remote bookmark %s\n") % k) |
|
227 | 237 | |
|
228 | 238 | if changed: |
|
229 |
write( |
|
|
239 | localmarks.write() | |
|
230 | 240 | |
|
231 | 241 | def diff(ui, dst, src): |
|
232 | 242 | ui.status(_("searching for changed bookmarks\n")) |
@@ -246,6 +256,7 b' def diff(ui, dst, src):' | |||
|
246 | 256 | |
|
247 | 257 | def validdest(repo, old, new): |
|
248 | 258 | """Is the new bookmark destination a valid update from the old one""" |
|
259 | repo = repo.unfiltered() | |
|
249 | 260 | if old == new: |
|
250 | 261 | # Old == new -> nothing to update. |
|
251 | 262 | return False |
@@ -263,14 +274,10 b' def validdest(repo, old, new):' | |||
|
263 | 274 | while len(validdests) != plen: |
|
264 | 275 | plen = len(validdests) |
|
265 | 276 | succs = set(c.node() for c in validdests) |
|
266 |
for c in validdests |
|
|
267 | if c.phase() > phases.public: | |
|
268 | # obsolescence marker does not apply to public changeset | |
|
269 | succs.update(obsolete.allsuccessors(repo.obsstore, | |
|
270 | [c.node()])) | |
|
277 | mutable = [c.node() for c in validdests if c.mutable()] | |
|
278 | succs.update(obsolete.allsuccessors(repo.obsstore, mutable)) | |
|
271 | 279 | known = (n for n in succs if n in nm) |
|
272 | 280 | validdests = set(repo.set('%ln::', known)) |
|
273 | validdests.remove(old) | |
|
274 | 281 | return new in validdests |
|
275 | 282 | else: |
|
276 | 283 | return old.descendant(new) |
@@ -14,25 +14,28 b' were part of the actual repository.' | |||
|
14 | 14 | from node import nullid |
|
15 | 15 | from i18n import _ |
|
16 | 16 | import os, tempfile, shutil |
|
17 | import changegroup, util, mdiff, discovery, cmdutil | |
|
17 | import changegroup, util, mdiff, discovery, cmdutil, scmutil | |
|
18 | 18 | import localrepo, changelog, manifest, filelog, revlog, error |
|
19 | 19 | |
|
20 | 20 | class bundlerevlog(revlog.revlog): |
|
21 | 21 | def __init__(self, opener, indexfile, bundle, linkmapper): |
|
22 | 22 | # How it works: |
|
23 |
# |
|
|
24 |
# the |
|
|
23 | # To retrieve a revision, we need to know the offset of the revision in | |
|
24 | # the bundle (an unbundle object). We store this offset in the index | |
|
25 | # (start). | |
|
25 | 26 | # |
|
26 | # We store this offset in the index (start), to differentiate a | |
|
27 | # rev in the bundle and from a rev in the revlog, we check | |
|
28 | # len(index[r]). If the tuple is bigger than 7, it is a bundle | |
|
29 | # (it is bigger since we store the node to which the delta is) | |
|
27 | # basemap is indexed with revisions coming from the bundle, and it | |
|
28 | # maps to the revision that is the base of the corresponding delta. | |
|
30 | 29 | # |
|
30 | # To differentiate a rev in the bundle from a rev in the revlog, we | |
|
31 | # check revision against basemap. | |
|
32 | opener = scmutil.readonlyvfs(opener) | |
|
31 | 33 | revlog.revlog.__init__(self, opener, indexfile) |
|
32 | 34 | self.bundle = bundle |
|
33 | self.basemap = {} | |
|
35 | self.basemap = {} # mapping rev to delta base rev | |
|
34 | 36 | n = len(self) |
|
35 | 37 | chain = None |
|
38 | self.bundlerevs = set() # used by 'bundle()' revset expression | |
|
36 | 39 | while True: |
|
37 | 40 | chunkdata = bundle.deltachunk(chain) |
|
38 | 41 | if not chunkdata: |
@@ -51,49 +54,50 b' class bundlerevlog(revlog.revlog):' | |||
|
51 | 54 | if node in self.nodemap: |
|
52 | 55 | # this can happen if two branches make the same change |
|
53 | 56 | chain = node |
|
57 | self.bundlerevs.add(self.nodemap[node]) | |
|
54 | 58 | continue |
|
55 | 59 | |
|
56 | 60 | for p in (p1, p2): |
|
57 | 61 | if p not in self.nodemap: |
|
58 | 62 | raise error.LookupError(p, self.indexfile, |
|
59 | 63 | _("unknown parent")) |
|
64 | ||
|
65 | if deltabase not in self.nodemap: | |
|
66 | raise LookupError(deltabase, self.indexfile, | |
|
67 | _('unknown delta base')) | |
|
68 | ||
|
69 | baserev = self.rev(deltabase) | |
|
60 | 70 | # start, size, full unc. size, base (unused), link, p1, p2, node |
|
61 | 71 | e = (revlog.offset_type(start, 0), size, -1, -1, link, |
|
62 | 72 | self.rev(p1), self.rev(p2), node) |
|
63 |
self.basemap[n] = |
|
|
73 | self.basemap[n] = baserev | |
|
64 | 74 | self.index.insert(-1, e) |
|
65 | 75 | self.nodemap[node] = n |
|
76 | self.bundlerevs.add(n) | |
|
66 | 77 | chain = node |
|
67 | 78 | n += 1 |
|
68 | 79 | |
|
69 | def inbundle(self, rev): | |
|
70 | """is rev from the bundle""" | |
|
71 | if rev < 0: | |
|
72 | return False | |
|
73 | return rev in self.basemap | |
|
74 | def bundlebase(self, rev): | |
|
75 | return self.basemap[rev] | |
|
76 | 80 | def _chunk(self, rev): |
|
77 |
# Warning: in case of bundle, the diff is against |
|
|
81 | # Warning: in case of bundle, the diff is against self.basemap, | |
|
78 | 82 | # not against rev - 1 |
|
79 | 83 | # XXX: could use some caching |
|
80 |
if not self. |
|
|
84 | if rev not in self.basemap: | |
|
81 | 85 | return revlog.revlog._chunk(self, rev) |
|
82 | 86 | self.bundle.seek(self.start(rev)) |
|
83 | 87 | return self.bundle.read(self.length(rev)) |
|
84 | 88 | |
|
85 | 89 | def revdiff(self, rev1, rev2): |
|
86 | 90 | """return or calculate a delta between two revisions""" |
|
87 | if self.inbundle(rev1) and self.inbundle(rev2): | |
|
91 | if rev1 in self.basemap and rev2 in self.basemap: | |
|
88 | 92 | # hot path for bundle |
|
89 |
revb = self. |
|
|
93 | revb = self.basemap[rev2] | |
|
90 | 94 | if revb == rev1: |
|
91 | 95 | return self._chunk(rev2) |
|
92 |
elif not self. |
|
|
96 | elif rev1 not in self.basemap and rev2 not in self.basemap: | |
|
93 | 97 | return revlog.revlog.revdiff(self, rev1, rev2) |
|
94 | 98 | |
|
95 | 99 | return mdiff.textdiff(self.revision(self.node(rev1)), |
|
96 | self.revision(self.node(rev2))) | |
|
100 | self.revision(self.node(rev2))) | |
|
97 | 101 | |
|
98 | 102 | def revision(self, nodeorrev): |
|
99 | 103 | """return an uncompressed revision of a given node or revision |
@@ -111,28 +115,23 b' class bundlerevlog(revlog.revlog):' | |||
|
111 | 115 | |
|
112 | 116 | text = None |
|
113 | 117 | chain = [] |
|
114 |
iter |
|
|
118 | iterrev = rev | |
|
115 | 119 | # reconstruct the revision if it is from a changegroup |
|
116 |
while self. |
|
|
117 |
if self._cache and self._cache[ |
|
|
120 | while iterrev in self.basemap: | |
|
121 | if self._cache and self._cache[1] == iterrev: | |
|
118 | 122 | text = self._cache[2] |
|
119 | 123 | break |
|
120 | chain.append(rev) | |
|
121 |
iter |
|
|
122 | rev = self.rev(iter_node) | |
|
124 | chain.append(iterrev) | |
|
125 | iterrev = self.basemap[iterrev] | |
|
123 | 126 | if text is None: |
|
124 |
text = revlog.revlog.revision(self, iter |
|
|
127 | text = revlog.revlog.revision(self, iterrev) | |
|
125 | 128 | |
|
126 | 129 | while chain: |
|
127 | 130 | delta = self._chunk(chain.pop()) |
|
128 | 131 | text = mdiff.patches(text, [delta]) |
|
129 | 132 | |
|
130 | p1, p2 = self.parents(node) | |
|
131 | if node != revlog.hash(text, p1, p2): | |
|
132 | raise error.RevlogError(_("integrity check failed on %s:%d") | |
|
133 | % (self.datafile, self.rev(node))) | |
|
134 | ||
|
135 | self._cache = (node, self.rev(node), text) | |
|
133 | self._checkhash(text, node, rev) | |
|
134 | self._cache = (node, rev, text) | |
|
136 | 135 | return text |
|
137 | 136 | |
|
138 | 137 | def addrevision(self, text, transaction, link, p1=None, p2=None, d=None): |
@@ -212,7 +211,7 b' class bundlerepository(localrepo.localre' | |||
|
212 | 211 | # dict with the mapping 'filename' -> position in the bundle |
|
213 | 212 | self.bundlefilespos = {} |
|
214 | 213 | |
|
215 |
@ |
|
|
214 | @localrepo.unfilteredpropertycache | |
|
216 | 215 | def changelog(self): |
|
217 | 216 | # consume the header if it exists |
|
218 | 217 | self.bundle.changelogheader() |
@@ -220,7 +219,7 b' class bundlerepository(localrepo.localre' | |||
|
220 | 219 | self.manstart = self.bundle.tell() |
|
221 | 220 | return c |
|
222 | 221 | |
|
223 |
@ |
|
|
222 | @localrepo.unfilteredpropertycache | |
|
224 | 223 | def manifest(self): |
|
225 | 224 | self.bundle.seek(self.manstart) |
|
226 | 225 | # consume the header if it exists |
@@ -229,12 +228,12 b' class bundlerepository(localrepo.localre' | |||
|
229 | 228 | self.filestart = self.bundle.tell() |
|
230 | 229 | return m |
|
231 | 230 | |
|
232 |
@ |
|
|
231 | @localrepo.unfilteredpropertycache | |
|
233 | 232 | def manstart(self): |
|
234 | 233 | self.changelog |
|
235 | 234 | return self.manstart |
|
236 | 235 | |
|
237 |
@ |
|
|
236 | @localrepo.unfilteredpropertycache | |
|
238 | 237 | def filestart(self): |
|
239 | 238 | self.manifest |
|
240 | 239 | return self.filestart |
@@ -256,8 +255,6 b' class bundlerepository(localrepo.localre' | |||
|
256 | 255 | if not c: |
|
257 | 256 | break |
|
258 | 257 | |
|
259 | if f[0] == '/': | |
|
260 | f = f[1:] | |
|
261 | 258 | if f in self.bundlefilespos: |
|
262 | 259 | self.bundle.seek(self.bundlefilespos[f]) |
|
263 | 260 | return bundlefilelog(self.sopener, f, self.bundle, |
@@ -282,9 +279,6 b' class bundlerepository(localrepo.localre' | |||
|
282 | 279 | def getcwd(self): |
|
283 | 280 | return os.getcwd() # always outside the repo |
|
284 | 281 | |
|
285 | def _writebranchcache(self, branches, tip, tiprev): | |
|
286 | # don't overwrite the disk cache with bundle-augmented data | |
|
287 | pass | |
|
288 | 282 | |
|
289 | 283 | def instance(ui, path, create): |
|
290 | 284 | if create: |
@@ -384,4 +378,3 b' def getremotechanges(ui, repo, other, on' | |||
|
384 | 378 | other.close() |
|
385 | 379 | |
|
386 | 380 | return (localrepo, csets, cleanup) |
|
387 |
@@ -27,10 +27,13 b' def _string_escape(text):' | |||
|
27 | 27 | |
|
28 | 28 | def decodeextra(text): |
|
29 | 29 | """ |
|
30 |
>>> decodeextra(encodeextra({'foo': 'bar', 'baz': chr(0) + '2'}) |
|
|
31 | {'foo': 'bar', 'baz': '\\x002', 'branch': 'default'} | |
|
32 | >>> decodeextra(encodeextra({'foo': 'bar', 'baz': chr(92) + chr(0) + '2'})) | |
|
33 | {'foo': 'bar', 'baz': '\\\\\\x002', 'branch': 'default'} | |
|
30 | >>> sorted(decodeextra(encodeextra({'foo': 'bar', 'baz': chr(0) + '2'}) | |
|
31 | ... ).iteritems()) | |
|
32 | [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')] | |
|
33 | >>> sorted(decodeextra(encodeextra({'foo': 'bar', | |
|
34 | ... 'baz': chr(92) + chr(0) + '2'}) | |
|
35 | ... ).iteritems()) | |
|
36 | [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')] | |
|
34 | 37 | """ |
|
35 | 38 | extra = _defaultextra.copy() |
|
36 | 39 | for l in text.split('\0'): |
@@ -124,7 +127,7 b' class changelog(revlog.revlog):' | |||
|
124 | 127 | self._realopener = opener |
|
125 | 128 | self._delayed = False |
|
126 | 129 | self._divert = False |
|
127 | self.filteredrevs = () | |
|
130 | self.filteredrevs = frozenset() | |
|
128 | 131 | |
|
129 | 132 | def tip(self): |
|
130 | 133 | """filtered version of revlog.tip""" |
@@ -337,3 +340,10 b' class changelog(revlog.revlog):' | |||
|
337 | 340 | l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc] |
|
338 | 341 | text = "\n".join(l) |
|
339 | 342 | return self.addrevision(text, transaction, len(self), p1, p2) |
|
343 | ||
|
344 | def branch(self, rev): | |
|
345 | """return the branch of a revision | |
|
346 | ||
|
347 | This function exists because creating a changectx object | |
|
348 | just to access this is costly.""" | |
|
349 | return encoding.tolocal(self.read(rev)[5].get("branch")) |
@@ -10,7 +10,7 b' from i18n import _' | |||
|
10 | 10 | import os, sys, errno, re, tempfile |
|
11 | 11 | import util, scmutil, templater, patch, error, templatekw, revlog, copies |
|
12 | 12 | import match as matchmod |
|
13 |
import subrepo, context, repair |
|
|
13 | import subrepo, context, repair, graphmod, revset, phases, obsolete | |
|
14 | 14 | import changelog |
|
15 | 15 | import lock as lockmod |
|
16 | 16 | |
@@ -85,7 +85,7 b' def bailifchanged(repo):' | |||
|
85 | 85 | if modified or added or removed or deleted: |
|
86 | 86 | raise util.Abort(_("outstanding uncommitted changes")) |
|
87 | 87 | ctx = repo[None] |
|
88 | for s in ctx.substate: | |
|
88 | for s in sorted(ctx.substate): | |
|
89 | 89 | if ctx.sub(s).dirty(): |
|
90 | 90 | raise util.Abort(_("uncommitted changes in subrepo %s") % s) |
|
91 | 91 | |
@@ -1137,8 +1137,8 b' def walkchangerevs(repo, match, opts, pr' | |||
|
1137 | 1137 | for path in match.files(): |
|
1138 | 1138 | if path == '.' or path in repo.store: |
|
1139 | 1139 | break |
|
1140 |
|
|
|
1141 |
|
|
|
1140 | else: | |
|
1141 | return [] | |
|
1142 | 1142 | |
|
1143 | 1143 | if slowpath: |
|
1144 | 1144 | # We have to read the changelog to match filenames against |
@@ -1399,39 +1399,18 b' def getgraphlogrevs(repo, pats, opts):' | |||
|
1399 | 1399 | callable taking a revision number and returning a match objects |
|
1400 | 1400 | filtering the files to be detailed when displaying the revision. |
|
1401 | 1401 | """ |
|
1402 | def increasingrevs(repo, revs, matcher): | |
|
1403 | # The sorted input rev sequence is chopped in sub-sequences | |
|
1404 | # which are sorted in ascending order and passed to the | |
|
1405 | # matcher. The filtered revs are sorted again as they were in | |
|
1406 | # the original sub-sequence. This achieve several things: | |
|
1407 | # | |
|
1408 | # - getlogrevs() now returns a generator which behaviour is | |
|
1409 | # adapted to log need. First results come fast, last ones | |
|
1410 | # are batched for performances. | |
|
1411 | # | |
|
1412 | # - revset matchers often operate faster on revision in | |
|
1413 | # changelog order, because most filters deal with the | |
|
1414 | # changelog. | |
|
1415 | # | |
|
1416 | # - revset matchers can reorder revisions. "A or B" typically | |
|
1417 | # returns returns the revision matching A then the revision | |
|
1418 | # matching B. We want to hide this internal implementation | |
|
1419 | # detail from the caller, and sorting the filtered revision | |
|
1420 | # again achieves this. | |
|
1421 | for i, window in increasingwindows(0, len(revs), windowsize=1): | |
|
1422 | orevs = revs[i:i + window] | |
|
1423 | nrevs = set(matcher(repo, sorted(orevs))) | |
|
1424 | for rev in orevs: | |
|
1425 | if rev in nrevs: | |
|
1426 | yield rev | |
|
1427 | ||
|
1428 | 1402 | if not len(repo): |
|
1429 |
return |
|
|
1403 | return [], None, None | |
|
1404 | limit = loglimit(opts) | |
|
1430 | 1405 | # Default --rev value depends on --follow but --follow behaviour |
|
1431 | 1406 | # depends on revisions resolved from --rev... |
|
1432 | 1407 | follow = opts.get('follow') or opts.get('follow_first') |
|
1408 | possiblyunsorted = False # whether revs might need sorting | |
|
1433 | 1409 | if opts.get('rev'): |
|
1434 | 1410 | revs = scmutil.revrange(repo, opts['rev']) |
|
1411 | # Don't sort here because _makegraphlogrevset might depend on the | |
|
1412 | # order of revs | |
|
1413 | possiblyunsorted = True | |
|
1435 | 1414 | else: |
|
1436 | 1415 | if follow and len(repo) > 0: |
|
1437 | 1416 | revs = repo.revs('reverse(:.)') |
@@ -1439,17 +1418,23 b' def getgraphlogrevs(repo, pats, opts):' | |||
|
1439 | 1418 | revs = list(repo.changelog) |
|
1440 | 1419 | revs.reverse() |
|
1441 | 1420 | if not revs: |
|
1442 |
return |
|
|
1421 | return [], None, None | |
|
1443 | 1422 | expr, filematcher = _makegraphlogrevset(repo, pats, opts, revs) |
|
1423 | if possiblyunsorted: | |
|
1424 | revs.sort(reverse=True) | |
|
1444 | 1425 | if expr: |
|
1426 | # Revset matchers often operate faster on revisions in changelog | |
|
1427 | # order, because most filters deal with the changelog. | |
|
1428 | revs.reverse() | |
|
1445 | 1429 | matcher = revset.match(repo.ui, expr) |
|
1446 | revs = increasingrevs(repo, revs, matcher) | |
|
1447 | if not opts.get('hidden'): | |
|
1448 | # --hidden is still experimental and not worth a dedicated revset | |
|
1449 | # yet. Fortunately, filtering revision number is fast. | |
|
1450 | revs = (r for r in revs if r not in repo.hiddenrevs) | |
|
1451 | else: | |
|
1452 |
revs = |
|
|
1430 | # Revset matches can reorder revisions. "A or B" typically returns | |
|
1431 | # returns the revision matching A then the revision matching B. Sort | |
|
1432 | # again to fix that. | |
|
1433 | revs = matcher(repo, revs) | |
|
1434 | revs.sort(reverse=True) | |
|
1435 | if limit is not None: | |
|
1436 | revs = revs[:limit] | |
|
1437 | ||
|
1453 | 1438 | return revs, expr, filematcher |
|
1454 | 1439 | |
|
1455 | 1440 | def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None, |
@@ -1484,10 +1469,6 b' def displaygraph(ui, dag, displayer, sho' | |||
|
1484 | 1469 | def graphlog(ui, repo, *pats, **opts): |
|
1485 | 1470 | # Parameters are identical to log command ones |
|
1486 | 1471 | revs, expr, filematcher = getgraphlogrevs(repo, pats, opts) |
|
1487 | revs = sorted(revs, reverse=1) | |
|
1488 | limit = loglimit(opts) | |
|
1489 | if limit is not None: | |
|
1490 | revs = revs[:limit] | |
|
1491 | 1472 | revdag = graphmod.dagwalker(repo, revs) |
|
1492 | 1473 | |
|
1493 | 1474 | getrenamed = None |
@@ -1534,7 +1515,7 b' def add(ui, repo, match, dryrun, listsub' | |||
|
1534 | 1515 | if ui.verbose or not exact: |
|
1535 | 1516 | ui.status(_('adding %s\n') % match.rel(join(f))) |
|
1536 | 1517 | |
|
1537 | for subpath in wctx.substate: | |
|
1518 | for subpath in sorted(wctx.substate): | |
|
1538 | 1519 | sub = wctx.sub(subpath) |
|
1539 | 1520 | try: |
|
1540 | 1521 | submatch = matchmod.narrowmatcher(subpath, match) |
@@ -1565,7 +1546,7 b' def forget(ui, repo, match, prefix, expl' | |||
|
1565 | 1546 | if explicitonly: |
|
1566 | 1547 | forget = [f for f in forget if match.exact(f)] |
|
1567 | 1548 | |
|
1568 | for subpath in wctx.substate: | |
|
1549 | for subpath in sorted(wctx.substate): | |
|
1569 | 1550 | sub = wctx.sub(subpath) |
|
1570 | 1551 | try: |
|
1571 | 1552 | submatch = matchmod.narrowmatcher(subpath, match) |
@@ -1762,9 +1743,10 b' def amend(ui, repo, commitfunc, old, ext' | |||
|
1762 | 1743 | # Move bookmarks from old parent to amend commit |
|
1763 | 1744 | bms = repo.nodebookmarks(old.node()) |
|
1764 | 1745 | if bms: |
|
1746 | marks = repo._bookmarks | |
|
1765 | 1747 | for bm in bms: |
|
1766 |
|
|
|
1767 |
|
|
|
1748 | marks[bm] = newid | |
|
1749 | marks.write() | |
|
1768 | 1750 | #commit the whole amend process |
|
1769 | 1751 | if obsolete._enabled and newid != old.node(): |
|
1770 | 1752 | # mark the new changeset as successor of the rewritten one |
@@ -1875,7 +1857,7 b' def revert(ui, repo, ctx, parents, *pats' | |||
|
1875 | 1857 | names[abs] = m.rel(abs), m.exact(abs) |
|
1876 | 1858 | |
|
1877 | 1859 | # get the list of subrepos that must be reverted |
|
1878 |
targetsubs = |
|
|
1860 | targetsubs = sorted(s for s in ctx.substate if m(s)) | |
|
1879 | 1861 | m = scmutil.matchfiles(repo, names) |
|
1880 | 1862 | changes = repo.status(match=m)[:4] |
|
1881 | 1863 | modified, added, removed, deleted = map(set, changes) |
@@ -2015,12 +1997,12 b' def command(table):' | |||
|
2015 | 1997 | '''returns a function object bound to table which can be used as |
|
2016 | 1998 | a decorator for populating table as a command table''' |
|
2017 | 1999 | |
|
2018 | def cmd(name, options, synopsis=None): | |
|
2000 | def cmd(name, options=(), synopsis=None): | |
|
2019 | 2001 | def decorator(func): |
|
2020 | 2002 | if synopsis: |
|
2021 |
table[name] = func, options |
|
|
2003 | table[name] = func, list(options), synopsis | |
|
2022 | 2004 | else: |
|
2023 |
table[name] = func, options |
|
|
2005 | table[name] = func, list(options) | |
|
2024 | 2006 | return func |
|
2025 | 2007 | return decorator |
|
2026 | 2008 |
@@ -49,6 +49,7 b' globalopts = [' | |||
|
49 | 49 | ('', 'profile', None, _('print command execution profile')), |
|
50 | 50 | ('', 'version', None, _('output version information and exit')), |
|
51 | 51 | ('h', 'help', None, _('display help and exit')), |
|
52 | ('', 'hidden', False, _('consider hidden changesets')), | |
|
52 | 53 | ] |
|
53 | 54 | |
|
54 | 55 | dryrunopts = [('n', 'dry-run', None, |
@@ -549,6 +550,10 b' def bisect(ui, repo, rev=None, extra=Non' | |||
|
549 | 550 | hg bisect --skip |
|
550 | 551 | hg bisect --skip 23 |
|
551 | 552 | |
|
553 | - skip all revisions that do not touch directories ``foo`` or ``bar`` | |
|
554 | ||
|
555 | hg bisect --skip '!( file("path:foo") & file("path:bar") )' | |
|
556 | ||
|
552 | 557 | - forget the current bisection:: |
|
553 | 558 | |
|
554 | 559 | hg bisect --reset |
@@ -754,7 +759,7 b' def bisect(ui, repo, rev=None, extra=Non' | |||
|
754 | 759 | cmdutil.bailifchanged(repo) |
|
755 | 760 | return hg.clean(repo, node) |
|
756 | 761 | |
|
757 | @command('bookmarks', | |
|
762 | @command('bookmarks|bookmark', | |
|
758 | 763 | [('f', 'force', False, _('force')), |
|
759 | 764 | ('r', 'rev', '', _('revision'), _('REV')), |
|
760 | 765 | ('d', 'delete', False, _('delete a given bookmark')), |
@@ -821,7 +826,7 b' def bookmark(ui, repo, mark=None, rev=No' | |||
|
821 | 826 | if mark == repo._bookmarkcurrent: |
|
822 | 827 | bookmarks.setcurrent(repo, None) |
|
823 | 828 | del marks[mark] |
|
824 |
|
|
|
829 | marks.write() | |
|
825 | 830 | |
|
826 | 831 | elif rename: |
|
827 | 832 | if mark is None: |
@@ -834,7 +839,7 b' def bookmark(ui, repo, mark=None, rev=No' | |||
|
834 | 839 | if repo._bookmarkcurrent == rename and not inactive: |
|
835 | 840 | bookmarks.setcurrent(repo, mark) |
|
836 | 841 | del marks[rename] |
|
837 |
|
|
|
842 | marks.write() | |
|
838 | 843 | |
|
839 | 844 | elif mark is not None: |
|
840 | 845 | mark = checkformat(mark) |
@@ -848,7 +853,7 b' def bookmark(ui, repo, mark=None, rev=No' | |||
|
848 | 853 | marks[mark] = cur |
|
849 | 854 | if not inactive and cur == marks[mark]: |
|
850 | 855 | bookmarks.setcurrent(repo, mark) |
|
851 |
|
|
|
856 | marks.write() | |
|
852 | 857 | |
|
853 | 858 | # Same message whether trying to deactivate the current bookmark (-i |
|
854 | 859 | # with no NAME) or listing bookmarks |
@@ -924,7 +929,7 b' def branch(ui, repo, label=None, **opts)' | |||
|
924 | 929 | ' exists'), |
|
925 | 930 | # i18n: "it" refers to an existing branch |
|
926 | 931 | hint=_("use 'hg update' to switch to it")) |
|
927 |
scmutil.checknewlabel( |
|
|
932 | scmutil.checknewlabel(repo, label, 'branch') | |
|
928 | 933 | repo.dirstate.setbranch(label) |
|
929 | 934 | ui.status(_('marked working directory as branch %s\n') % label) |
|
930 | 935 | ui.status(_('(branches are permanent and global, ' |
@@ -1292,7 +1297,7 b' def commit(ui, repo, *pats, **opts):' | |||
|
1292 | 1297 | raise util.Abort(_('cannot amend merge changesets')) |
|
1293 | 1298 | if len(repo[None].parents()) > 1: |
|
1294 | 1299 | raise util.Abort(_('cannot amend while merging')) |
|
1295 | if old.children(): | |
|
1300 | if (not obsolete._enabled) and old.children(): | |
|
1296 | 1301 | raise util.Abort(_('cannot amend changeset with children')) |
|
1297 | 1302 | |
|
1298 | 1303 | e = cmdutil.commiteditor |
@@ -1322,11 +1327,12 b' def commit(ui, repo, *pats, **opts):' | |||
|
1322 | 1327 | elif marks: |
|
1323 | 1328 | ui.debug('moving bookmarks %r from %s to %s\n' % |
|
1324 | 1329 | (marks, old.hex(), hex(node))) |
|
1330 | newmarks = repo._bookmarks | |
|
1325 | 1331 | for bm in marks: |
|
1326 |
|
|
|
1332 | newmarks[bm] = node | |
|
1327 | 1333 | if bm == current: |
|
1328 | 1334 | bookmarks.setcurrent(repo, bm) |
|
1329 |
|
|
|
1335 | newmarks.write() | |
|
1330 | 1336 | else: |
|
1331 | 1337 | e = cmdutil.commiteditor |
|
1332 | 1338 | if opts.get('force_editor'): |
@@ -1513,7 +1519,7 b' def debugbuilddag(ui, repo, text=None,' | |||
|
1513 | 1519 | ui.progress(_('building'), id, unit=_('revisions'), total=total) |
|
1514 | 1520 | for type, data in dagparser.parsedag(text): |
|
1515 | 1521 | if type == 'n': |
|
1516 | ui.note('node %s\n' % str(data)) | |
|
1522 | ui.note(('node %s\n' % str(data))) | |
|
1517 | 1523 | id, ps = data |
|
1518 | 1524 | |
|
1519 | 1525 | files = [] |
@@ -1526,7 +1532,8 b' def debugbuilddag(ui, repo, text=None,' | |||
|
1526 | 1532 | if len(ps) > 1: |
|
1527 | 1533 | p2 = repo[ps[1]] |
|
1528 | 1534 | pa = p1.ancestor(p2) |
|
1529 |
base, local, other = [x[fn].data() for x in pa, p1, |
|
|
1535 | base, local, other = [x[fn].data() for x in (pa, p1, | |
|
1536 | p2)] | |
|
1530 | 1537 | m3 = simplemerge.Merge3Text(base, local, other) |
|
1531 | 1538 | ml = [l.strip() for l in m3.merge_lines()] |
|
1532 | 1539 | ml.append("") |
@@ -1574,10 +1581,10 b' def debugbuilddag(ui, repo, text=None,' | |||
|
1574 | 1581 | at = id |
|
1575 | 1582 | elif type == 'l': |
|
1576 | 1583 | id, name = data |
|
1577 | ui.note('tag %s\n' % name) | |
|
1584 | ui.note(('tag %s\n' % name)) | |
|
1578 | 1585 | tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name)) |
|
1579 | 1586 | elif type == 'a': |
|
1580 | ui.note('branch %s\n' % data) | |
|
1587 | ui.note(('branch %s\n' % data)) | |
|
1581 | 1588 | atbranch = data |
|
1582 | 1589 | ui.progress(_('building'), id, unit=_('revisions'), total=total) |
|
1583 | 1590 | tr.close() |
@@ -1595,7 +1602,7 b' def debugbundle(ui, bundlepath, all=None' | |||
|
1595 | 1602 | try: |
|
1596 | 1603 | gen = changegroup.readbundle(f, bundlepath) |
|
1597 | 1604 | if all: |
|
1598 | ui.write("format: id, p1, p2, cset, delta base, len(delta)\n") | |
|
1605 | ui.write(("format: id, p1, p2, cset, delta base, len(delta)\n")) | |
|
1599 | 1606 | |
|
1600 | 1607 | def showchunks(named): |
|
1601 | 1608 | ui.write("\n%s\n" % named) |
@@ -1787,11 +1794,11 b' def debugdate(ui, date, range=None, **op' | |||
|
1787 | 1794 | d = util.parsedate(date, util.extendeddateformats) |
|
1788 | 1795 | else: |
|
1789 | 1796 | d = util.parsedate(date) |
|
1790 | ui.write("internal: %s %s\n" % d) | |
|
1791 | ui.write("standard: %s\n" % util.datestr(d)) | |
|
1797 | ui.write(("internal: %s %s\n") % d) | |
|
1798 | ui.write(("standard: %s\n") % util.datestr(d)) | |
|
1792 | 1799 | if range: |
|
1793 | 1800 | m = util.matchdate(range) |
|
1794 | ui.write("match: %s\n" % m(d[0])) | |
|
1801 | ui.write(("match: %s\n") % m(d[0])) | |
|
1795 | 1802 | |
|
1796 | 1803 | @command('debugdiscovery', |
|
1797 | 1804 | [('', 'old', None, _('use old-style discovery')), |
@@ -1821,8 +1828,8 b' def debugdiscovery(ui, repo, remoteurl="' | |||
|
1821 | 1828 | force=True) |
|
1822 | 1829 | common = set(common) |
|
1823 | 1830 | if not opts.get('nonheads'): |
|
1824 |
ui.write("unpruned common: %s\n" % |
|
|
1825 |
|
|
|
1831 | ui.write(("unpruned common: %s\n") % | |
|
1832 | " ".join(sorted(short(n) for n in common))) | |
|
1826 | 1833 | dag = dagutil.revlogdag(repo.changelog) |
|
1827 | 1834 | all = dag.ancestorset(dag.internalizeall(common)) |
|
1828 | 1835 | common = dag.externalizeall(dag.headsetofconnecteds(all)) |
@@ -1831,11 +1838,12 b' def debugdiscovery(ui, repo, remoteurl="' | |||
|
1831 | 1838 | common = set(common) |
|
1832 | 1839 | rheads = set(hds) |
|
1833 | 1840 | lheads = set(repo.heads()) |
|
1834 |
ui.write("common heads: %s\n" % |
|
|
1841 | ui.write(("common heads: %s\n") % | |
|
1842 | " ".join(sorted(short(n) for n in common))) | |
|
1835 | 1843 | if lheads <= common: |
|
1836 | ui.write("local is subset\n") | |
|
1844 | ui.write(("local is subset\n")) | |
|
1837 | 1845 | elif rheads <= common: |
|
1838 | ui.write("remote is subset\n") | |
|
1846 | ui.write(("remote is subset\n")) | |
|
1839 | 1847 | |
|
1840 | 1848 | serverlogs = opts.get('serverlog') |
|
1841 | 1849 | if serverlogs: |
@@ -1879,9 +1887,9 b' def debugfileset(ui, repo, expr, **opts)' | |||
|
1879 | 1887 | def debugfsinfo(ui, path = "."): |
|
1880 | 1888 | """show information detected about current filesystem""" |
|
1881 | 1889 | util.writefile('.debugfsinfo', '') |
|
1882 | ui.write('exec: %s\n' % (util.checkexec(path) and 'yes' or 'no')) | |
|
1883 | ui.write('symlink: %s\n' % (util.checklink(path) and 'yes' or 'no')) | |
|
1884 | ui.write('case-sensitive: %s\n' % (util.checkcase('.debugfsinfo') | |
|
1890 | ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no')) | |
|
1891 | ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no')) | |
|
1892 | ui.write(('case-sensitive: %s\n') % (util.checkcase('.debugfsinfo') | |
|
1885 | 1893 | and 'yes' or 'no')) |
|
1886 | 1894 | os.unlink('.debugfsinfo') |
|
1887 | 1895 | |
@@ -1979,7 +1987,7 b' def debugindexdot(ui, repo, file_):' | |||
|
1979 | 1987 | r = filelog |
|
1980 | 1988 | if not r: |
|
1981 | 1989 | r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), file_) |
|
1982 | ui.write("digraph G {\n") | |
|
1990 | ui.write(("digraph G {\n")) | |
|
1983 | 1991 | for i in r: |
|
1984 | 1992 | node = r.node(i) |
|
1985 | 1993 | pp = r.parents(node) |
@@ -2128,7 +2136,8 b' def debugobsolete(ui, repo, precursor=No' | |||
|
2128 | 2136 | ui.write(' ') |
|
2129 | 2137 | ui.write(hex(repl)) |
|
2130 | 2138 | ui.write(' %X ' % m._data[2]) |
|
2131 | ui.write(m.metadata()) | |
|
2139 | ui.write('{%s}' % (', '.join('%r: %r' % t for t in | |
|
2140 | sorted(m.metadata().items())))) | |
|
2132 | 2141 | ui.write('\n') |
|
2133 | 2142 | |
|
2134 | 2143 | @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]')) |
@@ -2148,7 +2157,7 b' def debugpushkey(ui, repopath, namespace' | |||
|
2148 | 2157 | ui.status(str(r) + '\n') |
|
2149 | 2158 | return not r |
|
2150 | 2159 | else: |
|
2151 | for k, v in target.listkeys(namespace).iteritems(): | |
|
2160 | for k, v in sorted(target.listkeys(namespace).iteritems()): | |
|
2152 | 2161 | ui.write("%s\t%s\n" % (k.encode('string-escape'), |
|
2153 | 2162 | v.encode('string-escape'))) |
|
2154 | 2163 | |
@@ -2325,52 +2334,54 b' def debugrevlog(ui, repo, file_ = None, ' | |||
|
2325 | 2334 | def pcfmt(value, total): |
|
2326 | 2335 | return (value, 100 * float(value) / total) |
|
2327 | 2336 | |
|
2328 | ui.write('format : %d\n' % format) | |
|
2329 | ui.write('flags : %s\n' % ', '.join(flags)) | |
|
2337 | ui.write(('format : %d\n') % format) | |
|
2338 | ui.write(('flags : %s\n') % ', '.join(flags)) | |
|
2330 | 2339 | |
|
2331 | 2340 | ui.write('\n') |
|
2332 | 2341 | fmt = pcfmtstr(totalsize) |
|
2333 | 2342 | fmt2 = dfmtstr(totalsize) |
|
2334 | ui.write('revisions : ' + fmt2 % numrevs) | |
|
2335 | ui.write(' merges : ' + fmt % pcfmt(nummerges, numrevs)) | |
|
2336 | ui.write(' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs)) | |
|
2337 | ui.write('revisions : ' + fmt2 % numrevs) | |
|
2338 | ui.write(' full : ' + fmt % pcfmt(numfull, numrevs)) | |
|
2339 | ui.write(' deltas : ' + fmt % pcfmt(numdeltas, numrevs)) | |
|
2340 | ui.write('revision size : ' + fmt2 % totalsize) | |
|
2341 | ui.write(' full : ' + fmt % pcfmt(fulltotal, totalsize)) | |
|
2342 | ui.write(' deltas : ' + fmt % pcfmt(deltatotal, totalsize)) | |
|
2343 | ui.write(('revisions : ') + fmt2 % numrevs) | |
|
2344 | ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs)) | |
|
2345 | ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs)) | |
|
2346 | ui.write(('revisions : ') + fmt2 % numrevs) | |
|
2347 | ui.write((' full : ') + fmt % pcfmt(numfull, numrevs)) | |
|
2348 | ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs)) | |
|
2349 | ui.write(('revision size : ') + fmt2 % totalsize) | |
|
2350 | ui.write((' full : ') + fmt % pcfmt(fulltotal, totalsize)) | |
|
2351 | ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize)) | |
|
2343 | 2352 | |
|
2344 | 2353 | ui.write('\n') |
|
2345 | 2354 | fmt = dfmtstr(max(avgchainlen, compratio)) |
|
2346 | ui.write('avg chain length : ' + fmt % avgchainlen) | |
|
2347 | ui.write('compression ratio : ' + fmt % compratio) | |
|
2355 | ui.write(('avg chain length : ') + fmt % avgchainlen) | |
|
2356 | ui.write(('compression ratio : ') + fmt % compratio) | |
|
2348 | 2357 | |
|
2349 | 2358 | if format > 0: |
|
2350 | 2359 | ui.write('\n') |
|
2351 | ui.write('uncompressed data size (min/max/avg) : %d / %d / %d\n' | |
|
2360 | ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n') | |
|
2352 | 2361 | % tuple(datasize)) |
|
2353 | ui.write('full revision size (min/max/avg) : %d / %d / %d\n' | |
|
2362 | ui.write(('full revision size (min/max/avg) : %d / %d / %d\n') | |
|
2354 | 2363 | % tuple(fullsize)) |
|
2355 | ui.write('delta size (min/max/avg) : %d / %d / %d\n' | |
|
2364 | ui.write(('delta size (min/max/avg) : %d / %d / %d\n') | |
|
2356 | 2365 | % tuple(deltasize)) |
|
2357 | 2366 | |
|
2358 | 2367 | if numdeltas > 0: |
|
2359 | 2368 | ui.write('\n') |
|
2360 | 2369 | fmt = pcfmtstr(numdeltas) |
|
2361 | 2370 | fmt2 = pcfmtstr(numdeltas, 4) |
|
2362 | ui.write('deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)) | |
|
2371 | ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas)) | |
|
2363 | 2372 | if numprev > 0: |
|
2364 | ui.write(' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, | |
|
2373 | ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev, | |
|
2365 | 2374 | numprev)) |
|
2366 | ui.write(' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, | |
|
2375 | ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev, | |
|
2367 | 2376 | numprev)) |
|
2368 | ui.write(' other : ' + fmt2 % pcfmt(numoprev, | |
|
2377 | ui.write((' other : ') + fmt2 % pcfmt(numoprev, | |
|
2369 | 2378 | numprev)) |
|
2370 | 2379 | if gdelta: |
|
2371 |
ui.write('deltas against p1 : ' |
|
|
2372 |
|
|
|
2373 |
ui.write('deltas against |
|
|
2380 | ui.write(('deltas against p1 : ') | |
|
2381 | + fmt % pcfmt(nump1, numdeltas)) | |
|
2382 | ui.write(('deltas against p2 : ') | |
|
2383 | + fmt % pcfmt(nump2, numdeltas)) | |
|
2384 | ui.write(('deltas against other : ') + fmt % pcfmt(numother, | |
|
2374 | 2385 | numdeltas)) |
|
2375 | 2386 | |
|
2376 | 2387 | @command('debugrevspec', [], ('REVSPEC')) |
@@ -2448,9 +2459,63 b' def debugstate(ui, repo, nodates=None, d' | |||
|
2448 | 2459 | def debugsub(ui, repo, rev=None): |
|
2449 | 2460 | ctx = scmutil.revsingle(repo, rev, None) |
|
2450 | 2461 | for k, v in sorted(ctx.substate.items()): |
|
2451 | ui.write('path %s\n' % k) | |
|
2452 | ui.write(' source %s\n' % v[0]) | |
|
2453 | ui.write(' revision %s\n' % v[1]) | |
|
2462 | ui.write(('path %s\n') % k) | |
|
2463 | ui.write((' source %s\n') % v[0]) | |
|
2464 | ui.write((' revision %s\n') % v[1]) | |
|
2465 | ||
|
2466 | @command('debugsuccessorssets', | |
|
2467 | [], | |
|
2468 | _('[REV]')) | |
|
2469 | def debugsuccessorssets(ui, repo, *revs): | |
|
2470 | """show set of successors for revision | |
|
2471 | ||
|
2472 | A successors set of changeset A is a consistent group of revisions that | |
|
2473 | succeed A. It contains non-obsolete changesets only. | |
|
2474 | ||
|
2475 | In most cases a changeset A has a single successors set containing a single | |
|
2476 | successors (changeset A replaced by A'). | |
|
2477 | ||
|
2478 | A changeset that is made obsolete with no successors are called "pruned". | |
|
2479 | Such changesets have no successors sets at all. | |
|
2480 | ||
|
2481 | A changeset that has been "split" will have a successors set containing | |
|
2482 | more than one successors. | |
|
2483 | ||
|
2484 | A changeset that has been rewritten in multiple different ways is called | |
|
2485 | "divergent". Such changesets have multiple successor sets (each of which | |
|
2486 | may also be split, i.e. have multiple successors). | |
|
2487 | ||
|
2488 | Results are displayed as follows:: | |
|
2489 | ||
|
2490 | <rev1> | |
|
2491 | <successors-1A> | |
|
2492 | <rev2> | |
|
2493 | <successors-2A> | |
|
2494 | <successors-2B1> <successors-2B2> <successors-2B3> | |
|
2495 | ||
|
2496 | Here rev2 has two possible (i.e. divergent) successors sets. The first | |
|
2497 | holds one element, whereas the second holds three (i.e. the changeset has | |
|
2498 | been split). | |
|
2499 | """ | |
|
2500 | # passed to successorssets caching computation from one call to another | |
|
2501 | cache = {} | |
|
2502 | ctx2str = str | |
|
2503 | node2str = short | |
|
2504 | if ui.debug(): | |
|
2505 | def ctx2str(ctx): | |
|
2506 | return ctx.hex() | |
|
2507 | node2str = hex | |
|
2508 | for rev in scmutil.revrange(repo, revs): | |
|
2509 | ctx = repo[rev] | |
|
2510 | ui.write('%s\n'% ctx2str(ctx)) | |
|
2511 | for succsset in obsolete.successorssets(repo, ctx.node(), cache): | |
|
2512 | if succsset: | |
|
2513 | ui.write(' ') | |
|
2514 | ui.write(node2str(succsset[0])) | |
|
2515 | for node in succsset[1:]: | |
|
2516 | ui.write(' ') | |
|
2517 | ui.write(node2str(node)) | |
|
2518 | ui.write('\n') | |
|
2454 | 2519 | |
|
2455 | 2520 | @command('debugwalk', walkopts, _('[OPTION]... [FILE]...')) |
|
2456 | 2521 | def debugwalk(ui, repo, *pats, **opts): |
@@ -2823,13 +2888,27 b' def graft(ui, repo, *revs, **opts):' | |||
|
2823 | 2888 | |
|
2824 | 2889 | wlock = repo.wlock() |
|
2825 | 2890 | try: |
|
2891 | current = repo['.'] | |
|
2826 | 2892 | for pos, ctx in enumerate(repo.set("%ld", revs)): |
|
2827 | current = repo['.'] | |
|
2828 | 2893 | |
|
2829 | 2894 | ui.status(_('grafting revision %s\n') % ctx.rev()) |
|
2830 | 2895 | if opts.get('dry_run'): |
|
2831 | 2896 | continue |
|
2832 | 2897 | |
|
2898 | source = ctx.extra().get('source') | |
|
2899 | if not source: | |
|
2900 | source = ctx.hex() | |
|
2901 | extra = {'source': source} | |
|
2902 | user = ctx.user() | |
|
2903 | if opts.get('user'): | |
|
2904 | user = opts['user'] | |
|
2905 | date = ctx.date() | |
|
2906 | if opts.get('date'): | |
|
2907 | date = opts['date'] | |
|
2908 | message = ctx.description() | |
|
2909 | if opts.get('log'): | |
|
2910 | message += '\n(grafted from %s)' % ctx.hex() | |
|
2911 | ||
|
2833 | 2912 | # we don't merge the first commit when continuing |
|
2834 | 2913 | if not cont: |
|
2835 | 2914 | # perform the graft merge with p1(rev) as 'ancestor' |
@@ -2858,29 +2937,18 b' def graft(ui, repo, *revs, **opts):' | |||
|
2858 | 2937 | cmdutil.duplicatecopies(repo, ctx.rev(), ctx.p1().rev()) |
|
2859 | 2938 | |
|
2860 | 2939 | # commit |
|
2861 | source = ctx.extra().get('source') | |
|
2862 | if not source: | |
|
2863 | source = ctx.hex() | |
|
2864 | extra = {'source': source} | |
|
2865 | user = ctx.user() | |
|
2866 | if opts.get('user'): | |
|
2867 | user = opts['user'] | |
|
2868 | date = ctx.date() | |
|
2869 | if opts.get('date'): | |
|
2870 | date = opts['date'] | |
|
2871 | message = ctx.description() | |
|
2872 | if opts.get('log'): | |
|
2873 | message += '\n(grafted from %s)' % ctx.hex() | |
|
2874 | 2940 | node = repo.commit(text=message, user=user, |
|
2875 | 2941 | date=date, extra=extra, editor=editor) |
|
2876 | 2942 | if node is None: |
|
2877 | 2943 | ui.status(_('graft for revision %s is empty\n') % ctx.rev()) |
|
2944 | else: | |
|
2945 | current = repo[node] | |
|
2878 | 2946 | finally: |
|
2879 | 2947 | wlock.release() |
|
2880 | 2948 | |
|
2881 | 2949 | # remove state when we complete successfully |
|
2882 | if not opts.get('dry_run') and os.path.exists(repo.join('graftstate')): | |
|
2883 | util.unlinkpath(repo.join('graftstate')) | |
|
2950 | if not opts.get('dry_run'): | |
|
2951 | util.unlinkpath(repo.join('graftstate'), ignoremissing=True) | |
|
2884 | 2952 | |
|
2885 | 2953 | return 0 |
|
2886 | 2954 | |
@@ -3564,7 +3632,7 b' def identify(ui, repo, source=None, rev=' | |||
|
3564 | 3632 | bms = [bm for bm, bmr in peer.listkeys('bookmarks').iteritems() |
|
3565 | 3633 | if bmr == hexremoterev] |
|
3566 | 3634 | |
|
3567 | return bms | |
|
3635 | return sorted(bms) | |
|
3568 | 3636 | |
|
3569 | 3637 | if bookmarks: |
|
3570 | 3638 | output.extend(getbms()) |
@@ -4024,7 +4092,6 b' def locate(ui, repo, *pats, **opts):' | |||
|
4024 | 4092 | _('show changesets within the given named branch'), _('BRANCH')), |
|
4025 | 4093 | ('P', 'prune', [], |
|
4026 | 4094 | _('do not display revision or any of its ancestors'), _('REV')), |
|
4027 | ('', 'hidden', False, _('show hidden changesets (DEPRECATED)')), | |
|
4028 | 4095 | ] + logopts + walkopts, |
|
4029 | 4096 | _('[OPTION]... [FILE]')) |
|
4030 | 4097 | def log(ui, repo, *pats, **opts): |
@@ -4140,8 +4207,6 b' def log(ui, repo, *pats, **opts):' | |||
|
4140 | 4207 | return |
|
4141 | 4208 | if opts.get('branch') and ctx.branch() not in opts['branch']: |
|
4142 | 4209 | return |
|
4143 | if not opts.get('hidden') and ctx.hidden(): | |
|
4144 | return | |
|
4145 | 4210 | if df and not df(ctx.date()[0]): |
|
4146 | 4211 | return |
|
4147 | 4212 | |
@@ -4207,6 +4272,9 b' def manifest(ui, repo, node=None, rev=No' | |||
|
4207 | 4272 | |
|
4208 | 4273 | Returns 0 on success. |
|
4209 | 4274 | """ |
|
4275 | ||
|
4276 | fm = ui.formatter('manifest', opts) | |
|
4277 | ||
|
4210 | 4278 | if opts.get('all'): |
|
4211 | 4279 | if rev or node: |
|
4212 | 4280 | raise util.Abort(_("can't specify a revision with --all")) |
@@ -4224,7 +4292,9 b' def manifest(ui, repo, node=None, rev=No' | |||
|
4224 | 4292 | finally: |
|
4225 | 4293 | lock.release() |
|
4226 | 4294 | for f in res: |
|
4227 |
|
|
|
4295 | fm.startitem() | |
|
4296 | fm.write("path", '%s\n', f) | |
|
4297 | fm.end() | |
|
4228 | 4298 | return |
|
4229 | 4299 | |
|
4230 | 4300 | if rev and node: |
@@ -4233,14 +4303,17 b' def manifest(ui, repo, node=None, rev=No' | |||
|
4233 | 4303 | if not node: |
|
4234 | 4304 | node = rev |
|
4235 | 4305 | |
|
4236 |
|
|
|
4306 | char = {'l': '@', 'x': '*', '': ''} | |
|
4307 | mode = {'l': '644', 'x': '755', '': '644'} | |
|
4237 | 4308 | ctx = scmutil.revsingle(repo, node) |
|
4309 | mf = ctx.manifest() | |
|
4238 | 4310 | for f in ctx: |
|
4239 | if ui.debugflag: | |
|
4240 | ui.write("%40s " % hex(ctx.manifest()[f])) | |
|
4241 | if ui.verbose: | |
|
4242 | ui.write(decor[ctx.flags(f)]) | |
|
4243 |
|
|
|
4311 | fm.startitem() | |
|
4312 | fl = ctx[f].flags() | |
|
4313 | fm.condwrite(ui.debugflag, 'hash', '%s ', hex(mf[f])) | |
|
4314 | fm.condwrite(ui.verbose, 'mode type', '%s %1s ', mode[fl], char[fl]) | |
|
4315 | fm.write('path', '%s\n', f) | |
|
4316 | fm.end() | |
|
4244 | 4317 | |
|
4245 | 4318 | @command('^merge', |
|
4246 | 4319 | [('f', 'force', None, _('force a merge with outstanding changes')), |
@@ -4556,10 +4629,14 b' def phase(ui, repo, *revs, **opts):' | |||
|
4556 | 4629 | phases.retractboundary(repo, targetphase, nodes) |
|
4557 | 4630 | finally: |
|
4558 | 4631 | lock.release() |
|
4559 | newdata = repo._phasecache.getphaserevs(repo) | |
|
4632 | # moving revision from public to draft may hide them | |
|
4633 | # We have to check result on an unfiltered repository | |
|
4634 | unfi = repo.unfiltered() | |
|
4635 | newdata = repo._phasecache.getphaserevs(unfi) | |
|
4560 | 4636 | changes = sum(o != newdata[i] for i, o in enumerate(olddata)) |
|
4637 | cl = unfi.changelog | |
|
4561 | 4638 | rejected = [n for n in nodes |
|
4562 |
if newdata[ |
|
|
4639 | if newdata[cl.rev(n)] < targetphase] | |
|
4563 | 4640 | if rejected: |
|
4564 | 4641 | ui.warn(_('cannot move %i changesets to a more permissive ' |
|
4565 | 4642 | 'phase, use --force\n') % len(rejected)) |
@@ -4666,11 +4743,12 b' def pull(ui, repo, source="default", **o' | |||
|
4666 | 4743 | |
|
4667 | 4744 | # update specified bookmarks |
|
4668 | 4745 | if opts.get('bookmark'): |
|
4746 | marks = repo._bookmarks | |
|
4669 | 4747 | for b in opts['bookmark']: |
|
4670 | 4748 | # explicit pull overrides local bookmark if any |
|
4671 | 4749 | ui.status(_("importing bookmark %s\n") % b) |
|
4672 |
|
|
|
4673 |
|
|
|
4750 | marks[b] = repo[rb[b]].node() | |
|
4751 | marks.write() | |
|
4674 | 4752 | |
|
4675 | 4753 | return ret |
|
4676 | 4754 | |
@@ -4861,8 +4939,7 b' def remove(ui, repo, *pats, **opts):' | |||
|
4861 | 4939 | elif after: |
|
4862 | 4940 | list = deleted |
|
4863 | 4941 | for f in modified + added + clean: |
|
4864 |
ui.warn(_('not removing %s: file still exists |
|
|
4865 | ' to force removal)\n') % m.rel(f)) | |
|
4942 | ui.warn(_('not removing %s: file still exists\n') % m.rel(f)) | |
|
4866 | 4943 | ret = 1 |
|
4867 | 4944 | else: |
|
4868 | 4945 | list = deleted + clean |
@@ -4885,11 +4962,7 b' def remove(ui, repo, *pats, **opts):' | |||
|
4885 | 4962 | for f in list: |
|
4886 | 4963 | if f in added: |
|
4887 | 4964 | continue # we never unlink added files on remove |
|
4888 | try: | |
|
4889 | util.unlinkpath(repo.wjoin(f)) | |
|
4890 | except OSError, inst: | |
|
4891 | if inst.errno != errno.ENOENT: | |
|
4892 | raise | |
|
4965 | util.unlinkpath(repo.wjoin(f), ignoremissing=True) | |
|
4893 | 4966 | repo[None].forget(list) |
|
4894 | 4967 | finally: |
|
4895 | 4968 | wlock.release() |
@@ -5427,17 +5500,16 b' def status(ui, repo, *pats, **opts):' | |||
|
5427 | 5500 | copy = copies.pathcopies(repo[node1], repo[node2]) |
|
5428 | 5501 | |
|
5429 | 5502 | fm = ui.formatter('status', opts) |
|
5430 |
f |
|
|
5431 |
|
|
|
5432 | format = '%.0s%s' + end | |
|
5503 | fmt = '%s' + end | |
|
5504 | showchar = not opts.get('no_status') | |
|
5433 | 5505 | |
|
5434 | 5506 | for state, char, files in changestates: |
|
5435 | 5507 | if state in show: |
|
5436 | 5508 | label = 'status.' + state |
|
5437 | 5509 | for f in files: |
|
5438 | 5510 | fm.startitem() |
|
5439 |
fm.write( |
|
|
5440 |
|
|
|
5511 | fm.condwrite(showchar, 'status', '%s ', char, label=label) | |
|
5512 | fm.write('path', fmt, repo.pathto(f, cwd), label=label) | |
|
5441 | 5513 | if f in copy: |
|
5442 | 5514 | fm.write("copy", ' %s' + end, repo.pathto(copy[f], cwd), |
|
5443 | 5515 | label='status.copied') |
@@ -5743,7 +5815,7 b' def tag(ui, repo, name1, *names, **opts)' | |||
|
5743 | 5815 | release(lock, wlock) |
|
5744 | 5816 | |
|
5745 | 5817 | @command('tags', [], '') |
|
5746 | def tags(ui, repo): | |
|
5818 | def tags(ui, repo, **opts): | |
|
5747 | 5819 | """list repository tags |
|
5748 | 5820 | |
|
5749 | 5821 | This lists both regular and local tags. When the -v/--verbose |
@@ -5752,27 +5824,27 b' def tags(ui, repo):' | |||
|
5752 | 5824 | Returns 0 on success. |
|
5753 | 5825 | """ |
|
5754 | 5826 | |
|
5827 | fm = ui.formatter('tags', opts) | |
|
5755 | 5828 | hexfunc = ui.debugflag and hex or short |
|
5756 | 5829 | tagtype = "" |
|
5757 | 5830 | |
|
5758 | 5831 | for t, n in reversed(repo.tagslist()): |
|
5759 | if ui.quiet: | |
|
5760 | ui.write("%s\n" % t, label='tags.normal') | |
|
5761 | continue | |
|
5762 | ||
|
5763 | 5832 | hn = hexfunc(n) |
|
5764 | r = "%5d:%s" % (repo.changelog.rev(n), hn) | |
|
5765 | rev = ui.label(r, 'log.changeset changeset.%s' % repo[n].phasestr()) | |
|
5766 | spaces = " " * (30 - encoding.colwidth(t)) | |
|
5767 | ||
|
5768 | tag = ui.label(t, 'tags.normal') | |
|
5769 | if ui.verbose: | |
|
5770 | if repo.tagtype(t) == 'local': | |
|
5771 | tagtype = " local" | |
|
5772 | tag = ui.label(t, 'tags.local') | |
|
5773 | else: | |
|
5774 | tagtype = "" | |
|
5775 | ui.write("%s%s %s%s\n" % (tag, spaces, rev, tagtype)) | |
|
5833 | label = 'tags.normal' | |
|
5834 | tagtype = '' | |
|
5835 | if repo.tagtype(t) == 'local': | |
|
5836 | label = 'tags.local' | |
|
5837 | tagtype = 'local' | |
|
5838 | ||
|
5839 | fm.startitem() | |
|
5840 | fm.write('tag', '%s', t, label=label) | |
|
5841 | fmt = " " * (30 - encoding.colwidth(t)) + ' %5d:%s' | |
|
5842 | fm.condwrite(not ui.quiet, 'rev id', fmt, | |
|
5843 | repo.changelog.rev(n), hn, label=label) | |
|
5844 | fm.condwrite(ui.verbose and tagtype, 'type', ' %s', | |
|
5845 | tagtype, label=label) | |
|
5846 | fm.plain('\n') | |
|
5847 | fm.end() | |
|
5776 | 5848 | |
|
5777 | 5849 | @command('tip', |
|
5778 | 5850 | [('p', 'patch', None, _('show patch')), |
@@ -42,7 +42,7 b' class channeledoutput(object):' | |||
|
42 | 42 | |
|
43 | 43 | def __getattr__(self, attr): |
|
44 | 44 | if attr in ('isatty', 'fileno'): |
|
45 |
raise AttributeError |
|
|
45 | raise AttributeError(attr) | |
|
46 | 46 | return getattr(self.in_, attr) |
|
47 | 47 | |
|
48 | 48 | class channeledinput(object): |
@@ -122,7 +122,7 b' class channeledinput(object):' | |||
|
122 | 122 | |
|
123 | 123 | def __getattr__(self, attr): |
|
124 | 124 | if attr in ('isatty', 'fileno'): |
|
125 |
raise AttributeError |
|
|
125 | raise AttributeError(attr) | |
|
126 | 126 | return getattr(self.in_, attr) |
|
127 | 127 | |
|
128 | 128 | class server(object): |
@@ -220,7 +220,7 b' class server(object):' | |||
|
220 | 220 | 'getencoding' : getencoding} |
|
221 | 221 | |
|
222 | 222 | def serve(self): |
|
223 |
hellomsg = 'capabilities: ' + ' '.join(self.capabilities |
|
|
223 | hellomsg = 'capabilities: ' + ' '.join(sorted(self.capabilities)) | |
|
224 | 224 | hellomsg += '\n' |
|
225 | 225 | hellomsg += 'encoding: ' + encoding.encoding |
|
226 | 226 |
@@ -12,6 +12,7 b' import copies' | |||
|
12 | 12 | import match as matchmod |
|
13 | 13 | import os, errno, stat |
|
14 | 14 | import obsolete as obsmod |
|
15 | import repoview | |
|
15 | 16 | |
|
16 | 17 | propertycache = util.propertycache |
|
17 | 18 | |
@@ -25,8 +26,12 b' class changectx(object):' | |||
|
25 | 26 | self._repo = repo |
|
26 | 27 | |
|
27 | 28 | if isinstance(changeid, int): |
|
29 | try: | |
|
30 | self._node = repo.changelog.node(changeid) | |
|
31 | except IndexError: | |
|
32 | raise error.RepoLookupError( | |
|
33 | _("unknown revision '%s'") % changeid) | |
|
28 | 34 | self._rev = changeid |
|
29 | self._node = repo.changelog.node(changeid) | |
|
30 | 35 | return |
|
31 | 36 | if isinstance(changeid, long): |
|
32 | 37 | changeid = str(changeid) |
@@ -62,7 +67,7 b' class changectx(object):' | |||
|
62 | 67 | self._rev = r |
|
63 | 68 | self._node = repo.changelog.node(r) |
|
64 | 69 | return |
|
65 | except (ValueError, OverflowError): | |
|
70 | except (ValueError, OverflowError, IndexError): | |
|
66 | 71 | pass |
|
67 | 72 | |
|
68 | 73 | if len(changeid) == 40: |
@@ -95,7 +100,10 b' class changectx(object):' | |||
|
95 | 100 | |
|
96 | 101 | # lookup failed |
|
97 | 102 | # check if it might have come from damaged dirstate |
|
98 | if changeid in repo.dirstate.parents(): | |
|
103 | # | |
|
104 | # XXX we could avoid the unfiltered if we had a recognizable exception | |
|
105 | # for filtered changeset access | |
|
106 | if changeid in repo.unfiltered().dirstate.parents(): | |
|
99 | 107 | raise error.Abort(_("working directory has unknown parent '%s'!") |
|
100 | 108 | % short(changeid)) |
|
101 | 109 | try: |
@@ -204,7 +212,7 b' class changectx(object):' | |||
|
204 | 212 | def mutable(self): |
|
205 | 213 | return self.phase() > phases.public |
|
206 | 214 | def hidden(self): |
|
207 |
return self._rev in self._repo |
|
|
215 | return self._rev in repoview.filterrevs(self._repo, 'visible') | |
|
208 | 216 | |
|
209 | 217 | def parents(self): |
|
210 | 218 | """return contexts for each parent changeset""" |
@@ -250,6 +258,34 b' class changectx(object):' | |||
|
250 | 258 | """ |
|
251 | 259 | return self.rev() in obsmod.getrevs(self._repo, 'bumped') |
|
252 | 260 | |
|
261 | def divergent(self): | |
|
262 | """Is a successors of a changeset with multiple possible successors set | |
|
263 | ||
|
264 | Only non-public and non-obsolete changesets may be divergent. | |
|
265 | """ | |
|
266 | return self.rev() in obsmod.getrevs(self._repo, 'divergent') | |
|
267 | ||
|
268 | def troubled(self): | |
|
269 | """True if the changeset is either unstable, bumped or divergent""" | |
|
270 | return self.unstable() or self.bumped() or self.divergent() | |
|
271 | ||
|
272 | def troubles(self): | |
|
273 | """return the list of troubles affecting this changesets. | |
|
274 | ||
|
275 | Troubles are returned as strings. possible values are: | |
|
276 | - unstable, | |
|
277 | - bumped, | |
|
278 | - divergent. | |
|
279 | """ | |
|
280 | troubles = [] | |
|
281 | if self.unstable(): | |
|
282 | troubles.append('unstable') | |
|
283 | if self.bumped(): | |
|
284 | troubles.append('bumped') | |
|
285 | if self.divergent(): | |
|
286 | troubles.append('divergent') | |
|
287 | return troubles | |
|
288 | ||
|
253 | 289 | def _fileinfo(self, path): |
|
254 | 290 | if '_manifest' in self.__dict__: |
|
255 | 291 | try: |
@@ -352,6 +388,9 b' class changectx(object):' | |||
|
352 | 388 | def dirs(self): |
|
353 | 389 | return self._dirs |
|
354 | 390 | |
|
391 | def dirty(self): | |
|
392 | return False | |
|
393 | ||
|
355 | 394 | class filectx(object): |
|
356 | 395 | """A filecontext object makes access to data related to a particular |
|
357 | 396 | filerevision convenient.""" |
@@ -380,7 +419,26 b' class filectx(object):' | |||
|
380 | 419 | |
|
381 | 420 | @propertycache |
|
382 | 421 | def _changectx(self): |
|
383 | return changectx(self._repo, self._changeid) | |
|
422 | try: | |
|
423 | return changectx(self._repo, self._changeid) | |
|
424 | except error.RepoLookupError: | |
|
425 | # Linkrev may point to any revision in the repository. When the | |
|
426 | # repository is filtered this may lead to `filectx` trying to build | |
|
427 | # `changectx` for filtered revision. In such case we fallback to | |
|
428 | # creating `changectx` on the unfiltered version of the reposition. | |
|
429 | # This fallback should not be an issue because`changectx` from | |
|
430 | # `filectx` are not used in complexe operation that care about | |
|
431 | # filtering. | |
|
432 | # | |
|
433 | # This fallback is a cheap and dirty fix that prevent several | |
|
434 | # crash. It does not ensure the behavior is correct. However the | |
|
435 | # behavior was not correct before filtering either and "incorrect | |
|
436 | # behavior" is seen as better as "crash" | |
|
437 | # | |
|
438 | # Linkrevs have several serious troubles with filtering that are | |
|
439 | # complicated to solve. Proper handling of the issue here should be | |
|
440 | # considered when solving linkrev issue are on the table. | |
|
441 | return changectx(self._repo.unfiltered(), self._changeid) | |
|
384 | 442 | |
|
385 | 443 | @propertycache |
|
386 | 444 | def _filelog(self): |
@@ -977,13 +1035,13 b' class workingctx(changectx):' | |||
|
977 | 1035 | return self._parents[0].ancestor(c2) # punt on two parents for now |
|
978 | 1036 | |
|
979 | 1037 | def walk(self, match): |
|
980 |
return sorted(self._repo.dirstate.walk(match, self.substate |
|
|
1038 | return sorted(self._repo.dirstate.walk(match, sorted(self.substate), | |
|
981 | 1039 | True, False)) |
|
982 | 1040 | |
|
983 | 1041 | def dirty(self, missing=False, merge=True, branch=True): |
|
984 | 1042 | "check whether a working directory is modified" |
|
985 | 1043 | # check subrepos first |
|
986 | for s in self.substate: | |
|
1044 | for s in sorted(self.substate): | |
|
987 | 1045 | if self.sub(s).dirty(): |
|
988 | 1046 | return True |
|
989 | 1047 | # check current working dir |
@@ -145,12 +145,16 b' def _forwardcopies(a, b):' | |||
|
145 | 145 | |
|
146 | 146 | return cm |
|
147 | 147 | |
|
148 |
def _backward |
|
|
149 | # because the forward mapping is 1:n, we can lose renames here | |
|
150 | # in particular, we find renames better than copies | |
|
148 | def _backwardrenames(a, b): | |
|
149 | # Even though we're not taking copies into account, 1:n rename situations | |
|
150 | # can still exist (e.g. hg cp a b; hg mv a c). In those cases we | |
|
151 | # arbitrarily pick one of the renames. | |
|
151 | 152 | f = _forwardcopies(b, a) |
|
152 | 153 | r = {} |
|
153 | for k, v in f.iteritems(): | |
|
154 | for k, v in sorted(f.iteritems()): | |
|
155 | # remove copies | |
|
156 | if v in a: | |
|
157 | continue | |
|
154 | 158 | r[v] = k |
|
155 | 159 | return r |
|
156 | 160 | |
@@ -162,19 +166,25 b' def pathcopies(x, y):' | |||
|
162 | 166 | if a == x: |
|
163 | 167 | return _forwardcopies(x, y) |
|
164 | 168 | if a == y: |
|
165 |
return _backward |
|
|
166 |
return _chain(x, y, _backward |
|
|
169 | return _backwardrenames(x, y) | |
|
170 | return _chain(x, y, _backwardrenames(x, a), _forwardcopies(a, y)) | |
|
167 | 171 | |
|
168 | 172 | def mergecopies(repo, c1, c2, ca): |
|
169 | 173 | """ |
|
170 | 174 | Find moves and copies between context c1 and c2 that are relevant |
|
171 | 175 | for merging. |
|
172 | 176 | |
|
173 |
Returns |
|
|
177 | Returns four dicts: "copy", "movewithdir", "diverge", and | |
|
178 | "renamedelete". | |
|
174 | 179 | |
|
175 | 180 | "copy" is a mapping from destination name -> source name, |
|
176 | 181 | where source is in c1 and destination is in c2 or vice-versa. |
|
177 | 182 | |
|
183 | "movewithdir" is a mapping from source name -> destination name, | |
|
184 | where the file at source present in one context but not the other | |
|
185 | needs to be moved to destination by the merge process, because the | |
|
186 | other context moved the directory it is in. | |
|
187 | ||
|
178 | 188 | "diverge" is a mapping of source name -> list of destination names |
|
179 | 189 | for divergent renames. |
|
180 | 190 | |
@@ -183,16 +193,16 b' def mergecopies(repo, c1, c2, ca):' | |||
|
183 | 193 | """ |
|
184 | 194 | # avoid silly behavior for update from empty dir |
|
185 | 195 | if not c1 or not c2 or c1 == c2: |
|
186 | return {}, {}, {} | |
|
196 | return {}, {}, {}, {} | |
|
187 | 197 | |
|
188 | 198 | # avoid silly behavior for parent -> working dir |
|
189 | 199 | if c2.node() is None and c1.node() == repo.dirstate.p1(): |
|
190 | return repo.dirstate.copies(), {}, {} | |
|
200 | return repo.dirstate.copies(), {}, {}, {} | |
|
191 | 201 | |
|
192 | 202 | limit = _findlimit(repo, c1.rev(), c2.rev()) |
|
193 | 203 | if limit is None: |
|
194 | 204 | # no common ancestor, no copies |
|
195 | return {}, {}, {} | |
|
205 | return {}, {}, {}, {} | |
|
196 | 206 | m1 = c1.manifest() |
|
197 | 207 | m2 = c2.manifest() |
|
198 | 208 | ma = ca.manifest() |
@@ -206,6 +216,7 b' def mergecopies(repo, c1, c2, ca):' | |||
|
206 | 216 | |
|
207 | 217 | ctx = util.lrucachefunc(makectx) |
|
208 | 218 | copy = {} |
|
219 | movewithdir = {} | |
|
209 | 220 | fullcopy = {} |
|
210 | 221 | diverge = {} |
|
211 | 222 | |
@@ -303,7 +314,7 b' def mergecopies(repo, c1, c2, ca):' | |||
|
303 | 314 | if fullcopy: |
|
304 | 315 | repo.ui.debug(" all copies found (* = to merge, ! = divergent, " |
|
305 | 316 | "% = renamed and deleted):\n") |
|
306 | for f in fullcopy: | |
|
317 | for f in sorted(fullcopy): | |
|
307 | 318 | note = "" |
|
308 | 319 | if f in copy: |
|
309 | 320 | note += "*" |
@@ -311,11 +322,12 b' def mergecopies(repo, c1, c2, ca):' | |||
|
311 | 322 | note += "!" |
|
312 | 323 | if f in renamedelete2: |
|
313 | 324 | note += "%" |
|
314 |
repo.ui.debug(" %s -> %s %s\n" % ( |
|
|
325 | repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f, | |
|
326 | note)) | |
|
315 | 327 | del diverge2 |
|
316 | 328 | |
|
317 | 329 | if not fullcopy: |
|
318 | return copy, diverge, renamedelete | |
|
330 | return copy, movewithdir, diverge, renamedelete | |
|
319 | 331 | |
|
320 | 332 | repo.ui.debug(" checking for directory renames\n") |
|
321 | 333 | |
@@ -352,10 +364,11 b' def mergecopies(repo, c1, c2, ca):' | |||
|
352 | 364 | del d1, d2, invalid |
|
353 | 365 | |
|
354 | 366 | if not dirmove: |
|
355 | return copy, diverge, renamedelete | |
|
367 | return copy, movewithdir, diverge, renamedelete | |
|
356 | 368 | |
|
357 | 369 | for d in dirmove: |
|
358 |
repo.ui.debug(" dir %s -> %s\n" % |
|
|
370 | repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" % | |
|
371 | (d, dirmove[d])) | |
|
359 | 372 | |
|
360 | 373 | # check unaccounted nonoverlapping files against directory moves |
|
361 | 374 | for f in u1 + u2: |
@@ -365,8 +378,9 b' def mergecopies(repo, c1, c2, ca):' | |||
|
365 | 378 | # new file added in a directory that was moved, move it |
|
366 | 379 | df = dirmove[d] + f[len(d):] |
|
367 | 380 | if df not in copy: |
|
368 |
|
|
|
369 |
repo.ui.debug(" file %s -> |
|
|
381 | movewithdir[f] = df | |
|
382 | repo.ui.debug((" pending file src: '%s' -> " | |
|
383 | "dst: '%s'\n") % (f, df)) | |
|
370 | 384 | break |
|
371 | 385 | |
|
372 | return copy, diverge, renamedelete | |
|
386 | return copy, movewithdir, diverge, renamedelete |
@@ -265,6 +265,12 b' class dirstate(object):' | |||
|
265 | 265 | try: |
|
266 | 266 | f.write(self._branch + '\n') |
|
267 | 267 | f.close() |
|
268 | ||
|
269 | # make sure filecache has the correct stat info for _branch after | |
|
270 | # replacing the underlying file | |
|
271 | ce = self._filecache['_branch'] | |
|
272 | if ce: | |
|
273 | ce.refresh() | |
|
268 | 274 | except: # re-raises |
|
269 | 275 | f.discard() |
|
270 | 276 | raise |
@@ -607,7 +613,7 b' class dirstate(object):' | |||
|
607 | 613 | normalize = self._normalize |
|
608 | 614 | skipstep3 = False |
|
609 | 615 | else: |
|
610 |
normalize = |
|
|
616 | normalize = None | |
|
611 | 617 | |
|
612 | 618 | files = sorted(match.files()) |
|
613 | 619 | subrepos.sort() |
@@ -628,7 +634,10 b' class dirstate(object):' | |||
|
628 | 634 | |
|
629 | 635 | # step 1: find all explicit files |
|
630 | 636 | for ff in files: |
|
631 | nf = normalize(normpath(ff), False, True) | |
|
637 | if normalize: | |
|
638 | nf = normalize(normpath(ff), False, True) | |
|
639 | else: | |
|
640 | nf = normpath(ff) | |
|
632 | 641 | if nf in results: |
|
633 | 642 | continue |
|
634 | 643 | |
@@ -678,7 +687,10 b' class dirstate(object):' | |||
|
678 | 687 | continue |
|
679 | 688 | raise |
|
680 | 689 | for f, kind, st in entries: |
|
681 | nf = normalize(nd and (nd + "/" + f) or f, True, True) | |
|
690 | if normalize: | |
|
691 | nf = normalize(nd and (nd + "/" + f) or f, True, True) | |
|
692 | else: | |
|
693 | nf = nd and (nd + "/" + f) or f | |
|
682 | 694 | if nf not in results: |
|
683 | 695 | if kind == dirkind: |
|
684 | 696 | if not ignore(nf): |
@@ -698,11 +710,9 b' class dirstate(object):' | |||
|
698 | 710 | # step 3: report unseen items in the dmap hash |
|
699 | 711 | if not skipstep3 and not exact: |
|
700 | 712 | visit = sorted([f for f in dmap if f not in results and matchfn(f)]) |
|
701 | for nf, st in zip(visit, util.statfiles([join(i) for i in visit])): | |
|
702 | if (not st is None and | |
|
703 | getkind(st.st_mode) not in (regkind, lnkkind)): | |
|
704 | st = None | |
|
705 | results[nf] = st | |
|
713 | nf = iter(visit).next | |
|
714 | for st in util.statfiles([join(i) for i in visit]): | |
|
715 | results[nf()] = st | |
|
706 | 716 | for s in subrepos: |
|
707 | 717 | del results[s] |
|
708 | 718 | del results['.hg'] |
@@ -748,13 +758,19 b' class dirstate(object):' | |||
|
748 | 758 | radd = removed.append |
|
749 | 759 | dadd = deleted.append |
|
750 | 760 | cadd = clean.append |
|
761 | mexact = match.exact | |
|
762 | dirignore = self._dirignore | |
|
763 | checkexec = self._checkexec | |
|
764 | checklink = self._checklink | |
|
765 | copymap = self._copymap | |
|
766 | lastnormaltime = self._lastnormaltime | |
|
751 | 767 | |
|
752 | 768 | lnkkind = stat.S_IFLNK |
|
753 | 769 | |
|
754 | 770 | for fn, st in self.walk(match, subrepos, listunknown, |
|
755 | 771 | listignored).iteritems(): |
|
756 | 772 | if fn not in dmap: |
|
757 |
if (listignored or m |
|
|
773 | if (listignored or mexact(fn)) and dirignore(fn): | |
|
758 | 774 | if listignored: |
|
759 | 775 | iadd(fn) |
|
760 | 776 | elif listunknown: |
@@ -773,15 +789,15 b' class dirstate(object):' | |||
|
773 | 789 | mtime = int(st.st_mtime) |
|
774 | 790 | if (size >= 0 and |
|
775 | 791 | ((size != st.st_size and size != st.st_size & _rangemask) |
|
776 |
or ((mode ^ st.st_mode) & 0100 and |
|
|
777 |
and (mode & lnkkind != lnkkind or |
|
|
792 | or ((mode ^ st.st_mode) & 0100 and checkexec)) | |
|
793 | and (mode & lnkkind != lnkkind or checklink) | |
|
778 | 794 | or size == -2 # other parent |
|
779 |
or fn in |
|
|
795 | or fn in copymap): | |
|
780 | 796 | madd(fn) |
|
781 | 797 | elif ((time != mtime and time != mtime & _rangemask) |
|
782 |
and (mode & lnkkind != lnkkind or |
|
|
798 | and (mode & lnkkind != lnkkind or checklink)): | |
|
783 | 799 | ladd(fn) |
|
784 |
elif mtime == |
|
|
800 | elif mtime == lastnormaltime: | |
|
785 | 801 | # fn may have been changed in the same timeslot without |
|
786 | 802 | # changing its size. This can happen if we quickly do |
|
787 | 803 | # multiple commits in a single transaction. |
@@ -8,6 +8,7 b'' | |||
|
8 | 8 | from node import nullid, short |
|
9 | 9 | from i18n import _ |
|
10 | 10 | import util, setdiscovery, treediscovery, phases, obsolete, bookmarks |
|
11 | import branchmap | |
|
11 | 12 | |
|
12 | 13 | def findcommonincoming(repo, remote, heads=None, force=False): |
|
13 | 14 | """Return a tuple (common, anyincoming, heads) used to identify the common |
@@ -114,7 +115,7 b' def findcommonoutgoing(repo, other, only' | |||
|
114 | 115 | og.missingheads = onlyheads or repo.heads() |
|
115 | 116 | elif onlyheads is None: |
|
116 | 117 | # use visible heads as it should be cached |
|
117 |
og.missingheads = |
|
|
118 | og.missingheads = repo.filtered("served").heads() | |
|
118 | 119 | og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')] |
|
119 | 120 | else: |
|
120 | 121 | # compute common, missing and exclude secret stuff |
@@ -192,9 +193,10 b' def _headssummary(repo, remote, outgoing' | |||
|
192 | 193 | |
|
193 | 194 | # D. Update newmap with outgoing changes. |
|
194 | 195 | # This will possibly add new heads and remove existing ones. |
|
195 | newmap = dict((branch, heads[1]) for branch, heads in headssum.iteritems() | |
|
196 | if heads[0] is not None) | |
|
197 | repo._updatebranchcache(newmap, missingctx) | |
|
196 | newmap = branchmap.branchcache((branch, heads[1]) | |
|
197 | for branch, heads in headssum.iteritems() | |
|
198 | if heads[0] is not None) | |
|
199 | newmap.update(repo, (ctx.rev() for ctx in missingctx)) | |
|
198 | 200 | for branch, newheads in newmap.iteritems(): |
|
199 | 201 | headssum[branch][1][:] = newheads |
|
200 | 202 | return headssum |
@@ -205,7 +207,7 b' def _oldheadssummary(repo, remoteheads, ' | |||
|
205 | 207 | cl = repo.changelog |
|
206 | 208 | # 1-4b. old servers: Check for new topological heads. |
|
207 | 209 | # Construct {old,new}map with branch = None (topological branch). |
|
208 |
# (code based on |
|
|
210 | # (code based on update) | |
|
209 | 211 | oldheads = set(h for h in remoteheads if h in cl.nodemap) |
|
210 | 212 | # all nodes in outgoing.missing are children of either: |
|
211 | 213 | # - an element of oldheads |
@@ -266,7 +268,7 b' def checkheads(repo, remote, outgoing, r' | |||
|
266 | 268 | allmissing = set(outgoing.missing) |
|
267 | 269 | allfuturecommon = set(c.node() for c in repo.set('%ld', outgoing.common)) |
|
268 | 270 | allfuturecommon.update(allmissing) |
|
269 | for branch, heads in headssum.iteritems(): | |
|
271 | for branch, heads in sorted(headssum.iteritems()): | |
|
270 | 272 | if heads[0] is None: |
|
271 | 273 | # Maybe we should abort if we push more that one head |
|
272 | 274 | # for new branches ? |
@@ -310,7 +312,7 b' def checkheads(repo, remote, outgoing, r' | |||
|
310 | 312 | unsynced = True |
|
311 | 313 | if len(newhs) > len(oldhs): |
|
312 | 314 | # strip updates to existing remote heads from the new heads list |
|
313 |
dhs = |
|
|
315 | dhs = sorted(newhs - bookmarkedheads - oldhs) | |
|
314 | 316 | if dhs: |
|
315 | 317 | if error is None: |
|
316 | 318 | if branch not in ('default', None): |
@@ -335,43 +337,3 b' def checkheads(repo, remote, outgoing, r' | |||
|
335 | 337 | # 6. Check for unsynced changes on involved branches. |
|
336 | 338 | if unsynced: |
|
337 | 339 | repo.ui.warn(_("note: unsynced remote changes!\n")) |
|
338 | ||
|
339 | def visibleheads(repo): | |
|
340 | """return the set of visible head of this repo""" | |
|
341 | # XXX we want a cache on this | |
|
342 | sroots = repo._phasecache.phaseroots[phases.secret] | |
|
343 | if sroots or repo.obsstore: | |
|
344 | # XXX very slow revset. storing heads or secret "boundary" | |
|
345 | # would help. | |
|
346 | revset = repo.set('heads(not (%ln:: + extinct()))', sroots) | |
|
347 | ||
|
348 | vheads = [ctx.node() for ctx in revset] | |
|
349 | if not vheads: | |
|
350 | vheads.append(nullid) | |
|
351 | else: | |
|
352 | vheads = repo.heads() | |
|
353 | return vheads | |
|
354 | ||
|
355 | ||
|
356 | def visiblebranchmap(repo): | |
|
357 | """return a branchmap for the visible set""" | |
|
358 | # XXX Recomputing this data on the fly is very slow. We should build a | |
|
359 | # XXX cached version while computing the standard branchmap version. | |
|
360 | sroots = repo._phasecache.phaseroots[phases.secret] | |
|
361 | if sroots or repo.obsstore: | |
|
362 | vbranchmap = {} | |
|
363 | for branch, nodes in repo.branchmap().iteritems(): | |
|
364 | # search for secret heads. | |
|
365 | for n in nodes: | |
|
366 | if repo[n].phase() >= phases.secret: | |
|
367 | nodes = None | |
|
368 | break | |
|
369 | # if secret heads were found we must compute them again | |
|
370 | if nodes is None: | |
|
371 | s = repo.set('heads(branch(%s) - secret() - extinct())', | |
|
372 | branch) | |
|
373 | nodes = [c.node() for c in s] | |
|
374 | vbranchmap[branch] = nodes | |
|
375 | else: | |
|
376 | vbranchmap = repo.branchmap() | |
|
377 | return vbranchmap |
@@ -183,8 +183,8 b' def _runcatch(req):' | |||
|
183 | 183 | else: |
|
184 | 184 | raise |
|
185 | 185 | except OSError, inst: |
|
186 | if getattr(inst, "filename", None): | |
|
187 | ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename)) | |
|
186 | if getattr(inst, "filename", None) is not None: | |
|
187 | ui.warn(_("abort: %s: '%s'\n") % (inst.strerror, inst.filename)) | |
|
188 | 188 | else: |
|
189 | 189 | ui.warn(_("abort: %s\n") % inst.strerror) |
|
190 | 190 | except KeyboardInterrupt: |
@@ -710,6 +710,8 b' def _dispatch(req):' | |||
|
710 | 710 | repo = hg.repository(ui, path=path) |
|
711 | 711 | if not repo.local(): |
|
712 | 712 | raise util.Abort(_("repository '%s' is not local") % path) |
|
713 | if options['hidden']: | |
|
714 | repo = repo.unfiltered() | |
|
713 | 715 | repo.ui.setconfig("bundle", "mainreporoot", repo.root) |
|
714 | 716 | except error.RequirementError: |
|
715 | 717 | raise |
@@ -80,8 +80,8 b' def tolocal(s):' | |||
|
80 | 80 | 'foo: \\xc3\\xa4' |
|
81 | 81 | >>> u2 = 'foo: \\xc3\\xa1' |
|
82 | 82 | >>> d = { l: 1, tolocal(u2): 2 } |
|
83 | >>> d # no collision | |
|
84 | {'foo: ?': 1, 'foo: ?': 2} | |
|
83 | >>> len(d) # no collision | |
|
84 | 2 | |
|
85 | 85 | >>> 'foo: ?' in d |
|
86 | 86 | False |
|
87 | 87 | >>> l1 = 'foo: \\xe4' # historical latin1 fallback |
@@ -171,13 +171,15 b' def _ifail(repo, mynode, orig, fcd, fco,' | |||
|
171 | 171 | |
|
172 | 172 | def _premerge(repo, toolconf, files): |
|
173 | 173 | tool, toolpath, binary, symlink = toolconf |
|
174 | if symlink: | |
|
175 | return 1 | |
|
174 | 176 | a, b, c, back = files |
|
175 | 177 | |
|
176 | 178 | ui = repo.ui |
|
177 | 179 | |
|
178 | 180 | # do we attempt to simplemerge first? |
|
179 | 181 | try: |
|
180 |
premerge = _toolbool(ui, tool, "premerge", not |
|
|
182 | premerge = _toolbool(ui, tool, "premerge", not binary) | |
|
181 | 183 | except error.ConfigError: |
|
182 | 184 | premerge = _toolstr(ui, tool, "premerge").lower() |
|
183 | 185 | valid = 'keep'.split() |
@@ -204,6 +206,12 b' def _imerge(repo, mynode, orig, fcd, fco' | |||
|
204 | 206 | Uses the internal non-interactive simple merge algorithm for merging |
|
205 | 207 | files. It will fail if there are any conflicts and leave markers in |
|
206 | 208 | the partially merged file.""" |
|
209 | tool, toolpath, binary, symlink = toolconf | |
|
210 | if symlink: | |
|
211 | repo.ui.warn(_('warning: internal:merge cannot merge symlinks ' | |
|
212 | 'for %s\n') % fcd.path()) | |
|
213 | return False, 1 | |
|
214 | ||
|
207 | 215 | r = _premerge(repo, toolconf, files) |
|
208 | 216 | if r: |
|
209 | 217 | a, b, c, back = files |
@@ -373,7 +373,7 b' def subrepo(mctx, x):' | |||
|
373 | 373 | # i18n: "subrepo" is a keyword |
|
374 | 374 | getargs(x, 0, 1, _("subrepo takes at most one argument")) |
|
375 | 375 | ctx = mctx.ctx |
|
376 | sstate = ctx.substate | |
|
376 | sstate = sorted(ctx.substate) | |
|
377 | 377 | if x: |
|
378 | 378 | pat = getstring(x, _("subrepo requires a pattern or no arguments")) |
|
379 | 379 |
@@ -31,6 +31,10 b' class baseformatter(object):' | |||
|
31 | 31 | '''do default text output while assigning data to item''' |
|
32 | 32 | for k, v in zip(fields.split(), fielddata): |
|
33 | 33 | self._item[k] = v |
|
34 | def condwrite(self, cond, fields, deftext, *fielddata, **opts): | |
|
35 | '''do conditional write (primarily for plain formatter)''' | |
|
36 | for k, v in zip(fields.split(), fielddata): | |
|
37 | self._item[k] = v | |
|
34 | 38 | def plain(self, text, **opts): |
|
35 | 39 | '''show raw text for non-templated mode''' |
|
36 | 40 | pass |
@@ -51,6 +55,10 b' class plainformatter(baseformatter):' | |||
|
51 | 55 | pass |
|
52 | 56 | def write(self, fields, deftext, *fielddata, **opts): |
|
53 | 57 | self._ui.write(deftext % fielddata, **opts) |
|
58 | def condwrite(self, cond, fields, deftext, *fielddata, **opts): | |
|
59 | '''do conditional write''' | |
|
60 | if cond: | |
|
61 | self._ui.write(deftext % fielddata, **opts) | |
|
54 | 62 | def plain(self, text, **opts): |
|
55 | 63 | self._ui.write(text, **opts) |
|
56 | 64 | def end(self): |
@@ -147,7 +147,7 b' def save_state(repo, state):' | |||
|
147 | 147 | f = repo.opener("bisect.state", "w", atomictemp=True) |
|
148 | 148 | wlock = repo.wlock() |
|
149 | 149 | try: |
|
150 | for kind in state: | |
|
150 | for kind in sorted(state): | |
|
151 | 151 | for node in state[kind]: |
|
152 | 152 | f.write("%s %s\n" % (kind, hex(node))) |
|
153 | 153 | f.close() |
@@ -850,14 +850,6 b' Supported arguments:' | |||
|
850 | 850 | ``prompt`` |
|
851 | 851 | Always prompt for merge success, regardless of success reported by tool. |
|
852 | 852 | |
|
853 | ``checkchanged`` | |
|
854 | True is equivalent to ``check = changed``. | |
|
855 | Default: False | |
|
856 | ||
|
857 | ``checkconflicts`` | |
|
858 | True is equivalent to ``check = conflicts``. | |
|
859 | Default: False | |
|
860 | ||
|
861 | 853 | ``fixeol`` |
|
862 | 854 | Attempt to fix up EOL changes caused by the merge tool. |
|
863 | 855 | Default: False |
@@ -1295,6 +1287,10 b' The full set of options is:' | |||
|
1295 | 1287 | (DEPRECATED) Whether to allow .zip downloading of repository |
|
1296 | 1288 | revisions. Default is False. This feature creates temporary files. |
|
1297 | 1289 | |
|
1290 | ``archivesubrepos`` | |
|
1291 | Whether to recurse into subrepositories when archiving. Default is | |
|
1292 | False. | |
|
1293 | ||
|
1298 | 1294 | ``baseurl`` |
|
1299 | 1295 | Base URL to use when publishing URLs in other locations, so |
|
1300 | 1296 | third-party tools like email notification hooks can construct |
@@ -113,7 +113,7 b" def repository(ui, path='', create=False" | |||
|
113 | 113 | if not repo: |
|
114 | 114 | raise util.Abort(_("repository '%s' is not local") % |
|
115 | 115 | (path or peer.url())) |
|
116 | return repo | |
|
116 | return repo.filtered('visible') | |
|
117 | 117 | |
|
118 | 118 | def peer(uiorrepo, opts, path, create=False): |
|
119 | 119 | '''return a repository peer for the specified path''' |
@@ -171,11 +171,14 b' def share(ui, source, dest=None, update=' | |||
|
171 | 171 | r = repository(ui, root) |
|
172 | 172 | |
|
173 | 173 | default = srcrepo.ui.config('paths', 'default') |
|
174 | if default: | |
|
175 | fp = r.opener("hgrc", "w", text=True) | |
|
176 | fp.write("[paths]\n") | |
|
177 | fp.write("default = %s\n" % default) | |
|
178 | fp.close() | |
|
174 | if not default: | |
|
175 | # set default to source for being able to clone subrepos | |
|
176 | default = os.path.abspath(util.urllocalpath(origsource)) | |
|
177 | fp = r.opener("hgrc", "w", text=True) | |
|
178 | fp.write("[paths]\n") | |
|
179 | fp.write("default = %s\n" % default) | |
|
180 | fp.close() | |
|
181 | r.ui.setconfig('paths', 'default', default) | |
|
179 | 182 | |
|
180 | 183 | if update: |
|
181 | 184 | r.ui.status(_("updating working directory\n")) |
@@ -288,17 +291,7 b' def clone(ui, peeropts, source, dest=Non' | |||
|
288 | 291 | elif os.listdir(dest): |
|
289 | 292 | raise util.Abort(_("destination '%s' is not empty") % dest) |
|
290 | 293 | |
|
291 | class DirCleanup(object): | |
|
292 | def __init__(self, dir_): | |
|
293 | self.rmtree = shutil.rmtree | |
|
294 | self.dir_ = dir_ | |
|
295 | def close(self): | |
|
296 | self.dir_ = None | |
|
297 | def cleanup(self): | |
|
298 | if self.dir_: | |
|
299 | self.rmtree(self.dir_, True) | |
|
300 | ||
|
301 | srclock = destlock = dircleanup = None | |
|
294 | srclock = destlock = cleandir = None | |
|
302 | 295 | srcrepo = srcpeer.local() |
|
303 | 296 | try: |
|
304 | 297 | abspath = origsource |
@@ -306,7 +299,7 b' def clone(ui, peeropts, source, dest=Non' | |||
|
306 | 299 | abspath = os.path.abspath(util.urllocalpath(origsource)) |
|
307 | 300 | |
|
308 | 301 | if islocal(dest): |
|
309 |
|
|
|
302 | cleandir = dest | |
|
310 | 303 | |
|
311 | 304 | copy = False |
|
312 | 305 | if (srcrepo and srcrepo.cancopy() and islocal(dest) |
@@ -330,13 +323,13 b' def clone(ui, peeropts, source, dest=Non' | |||
|
330 | 323 | os.mkdir(dest) |
|
331 | 324 | else: |
|
332 | 325 | # only clean up directories we create ourselves |
|
333 |
|
|
|
326 | cleandir = hgdir | |
|
334 | 327 | try: |
|
335 | 328 | destpath = hgdir |
|
336 | 329 | util.makedir(destpath, notindexed=True) |
|
337 | 330 | except OSError, inst: |
|
338 | 331 | if inst.errno == errno.EEXIST: |
|
339 |
|
|
|
332 | cleandir = None | |
|
340 | 333 | raise util.Abort(_("destination '%s' already exists") |
|
341 | 334 | % dest) |
|
342 | 335 | raise |
@@ -364,7 +357,7 b' def clone(ui, peeropts, source, dest=Non' | |||
|
364 | 357 | # only pass ui when no srcrepo |
|
365 | 358 | except OSError, inst: |
|
366 | 359 | if inst.errno == errno.EEXIST: |
|
367 |
|
|
|
360 | cleandir = None | |
|
368 | 361 | raise util.Abort(_("destination '%s' already exists") |
|
369 | 362 | % dest) |
|
370 | 363 | raise |
@@ -384,21 +377,21 b' def clone(ui, peeropts, source, dest=Non' | |||
|
384 | 377 | else: |
|
385 | 378 | raise util.Abort(_("clone from remote to remote not supported")) |
|
386 | 379 | |
|
387 | if dircleanup: | |
|
388 | dircleanup.close() | |
|
380 | cleandir = None | |
|
389 | 381 | |
|
390 | 382 | # clone all bookmarks except divergent ones |
|
391 | 383 | destrepo = destpeer.local() |
|
392 | 384 | if destrepo and srcpeer.capable("pushkey"): |
|
393 | 385 | rb = srcpeer.listkeys('bookmarks') |
|
386 | marks = destrepo._bookmarks | |
|
394 | 387 | for k, n in rb.iteritems(): |
|
395 | 388 | try: |
|
396 | 389 | m = destrepo.lookup(n) |
|
397 |
|
|
|
390 | marks[k] = m | |
|
398 | 391 | except error.RepoLookupError: |
|
399 | 392 | pass |
|
400 | 393 | if rb: |
|
401 |
|
|
|
394 | marks.write() | |
|
402 | 395 | elif srcrepo and destpeer.capable("pushkey"): |
|
403 | 396 | for k, n in srcrepo._bookmarks.iteritems(): |
|
404 | 397 | destpeer.pushkey('bookmarks', k, '', hex(n)) |
@@ -450,8 +443,8 b' def clone(ui, peeropts, source, dest=Non' | |||
|
450 | 443 | return srcpeer, destpeer |
|
451 | 444 | finally: |
|
452 | 445 | release(srclock, destlock) |
|
453 |
if |
|
|
454 |
|
|
|
446 | if cleandir is not None: | |
|
447 | shutil.rmtree(cleandir, True) | |
|
455 | 448 | if srcpeer is not None: |
|
456 | 449 | srcpeer.close() |
|
457 | 450 |
@@ -140,11 +140,11 b' def staticfile(directory, fname, req):' | |||
|
140 | 140 | try: |
|
141 | 141 | os.stat(path) |
|
142 | 142 | ct = mimetypes.guess_type(path)[0] or "text/plain" |
|
143 | req.respond(HTTP_OK, ct, length = os.path.getsize(path)) | |
|
144 | 143 | fp = open(path, 'rb') |
|
145 | 144 | data = fp.read() |
|
146 | 145 | fp.close() |
|
147 | return data | |
|
146 | req.respond(HTTP_OK, ct, body=data) | |
|
147 | return "" | |
|
148 | 148 | except TypeError: |
|
149 | 149 | raise ErrorResponse(HTTP_SERVER_ERROR, 'illegal filename') |
|
150 | 150 | except OSError, err: |
@@ -24,6 +24,30 b' perms = {' | |||
|
24 | 24 | 'pushkey': 'push', |
|
25 | 25 | } |
|
26 | 26 | |
|
27 | def makebreadcrumb(url): | |
|
28 | '''Return a 'URL breadcrumb' list | |
|
29 | ||
|
30 | A 'URL breadcrumb' is a list of URL-name pairs, | |
|
31 | corresponding to each of the path items on a URL. | |
|
32 | This can be used to create path navigation entries. | |
|
33 | ''' | |
|
34 | if url.endswith('/'): | |
|
35 | url = url[:-1] | |
|
36 | relpath = url | |
|
37 | if relpath.startswith('/'): | |
|
38 | relpath = relpath[1:] | |
|
39 | ||
|
40 | breadcrumb = [] | |
|
41 | urlel = url | |
|
42 | pathitems = [''] + relpath.split('/') | |
|
43 | for pathel in reversed(pathitems): | |
|
44 | if not pathel or not urlel: | |
|
45 | break | |
|
46 | breadcrumb.append({'url': urlel, 'name': pathel}) | |
|
47 | urlel = os.path.dirname(urlel) | |
|
48 | return reversed(breadcrumb) | |
|
49 | ||
|
50 | ||
|
27 | 51 | class hgweb(object): |
|
28 | 52 | def __init__(self, repo, name=None, baseui=None): |
|
29 | 53 | if isinstance(repo, str): |
@@ -35,6 +59,7 b' class hgweb(object):' | |||
|
35 | 59 | else: |
|
36 | 60 | self.repo = repo |
|
37 | 61 | |
|
62 | self.repo = self.repo.filtered('served') | |
|
38 | 63 | self.repo.ui.setconfig('ui', 'report_untrusted', 'off') |
|
39 | 64 | self.repo.ui.setconfig('ui', 'nontty', 'true') |
|
40 | 65 | hook.redirect(True) |
@@ -71,6 +96,7 b' class hgweb(object):' | |||
|
71 | 96 | self.mtime = st.st_mtime |
|
72 | 97 | self.size = st.st_size |
|
73 | 98 | self.repo = hg.repository(self.repo.ui, self.repo.root) |
|
99 | self.repo = self.repo.filtered('served') | |
|
74 | 100 | self.maxchanges = int(self.config("web", "maxchanges", 10)) |
|
75 | 101 | self.stripecount = int(self.config("web", "stripes", 1)) |
|
76 | 102 | self.maxshortchanges = int(self.config("web", "maxshortchanges", |
@@ -134,8 +160,9 b' class hgweb(object):' | |||
|
134 | 160 | '').lower() != '100-continue') or |
|
135 | 161 | req.env.get('X-HgHttp2', '')): |
|
136 | 162 | req.drain() |
|
137 |
req.respond(inst, protocol.HGTYPE |
|
|
138 |
|
|
|
163 | req.respond(inst, protocol.HGTYPE, | |
|
164 | body='0\n%s\n' % inst.message) | |
|
165 | return '' | |
|
139 | 166 | |
|
140 | 167 | # translate user-visible url structure to internal structure |
|
141 | 168 | |
@@ -285,7 +312,8 b' class hgweb(object):' | |||
|
285 | 312 | "header": header, |
|
286 | 313 | "footer": footer, |
|
287 | 314 | "motd": motd, |
|
288 | "sessionvars": sessionvars | |
|
315 | "sessionvars": sessionvars, | |
|
316 | "pathdef": makebreadcrumb(req.url), | |
|
289 | 317 | }) |
|
290 | 318 | return tmpl |
|
291 | 319 |
@@ -12,7 +12,7 b' from mercurial import ui, hg, scmutil, u' | |||
|
12 | 12 | from mercurial import error, encoding |
|
13 | 13 | from common import ErrorResponse, get_mtime, staticfile, paritygen, \ |
|
14 | 14 | get_contact, HTTP_OK, HTTP_NOT_FOUND, HTTP_SERVER_ERROR |
|
15 | from hgweb_mod import hgweb | |
|
15 | from hgweb_mod import hgweb, makebreadcrumb | |
|
16 | 16 | from request import wsgirequest |
|
17 | 17 | import webutil |
|
18 | 18 | |
@@ -310,7 +310,8 b' class hgwebdir(object):' | |||
|
310 | 310 | description_sort="", |
|
311 | 311 | lastchange=d, |
|
312 | 312 | lastchange_sort=d[1]-d[0], |
|
313 |
archives=[] |
|
|
313 | archives=[], | |
|
314 | isdirectory=True) | |
|
314 | 315 | |
|
315 | 316 | seendirs.add(name) |
|
316 | 317 | yield row |
@@ -394,6 +395,7 b' class hgwebdir(object):' | |||
|
394 | 395 | self.updatereqenv(req.env) |
|
395 | 396 | |
|
396 | 397 | return tmpl("index", entries=entries, subdir=subdir, |
|
398 | pathdef=makebreadcrumb('/' + subdir), | |
|
397 | 399 | sortcolumn=sortcolumn, descending=descending, |
|
398 | 400 | **dict(sort)) |
|
399 | 401 |
@@ -75,23 +75,24 b' def call(repo, req, cmd):' | |||
|
75 | 75 | p = webproto(req, repo.ui) |
|
76 | 76 | rsp = wireproto.dispatch(repo, p, cmd) |
|
77 | 77 | if isinstance(rsp, str): |
|
78 |
req.respond(HTTP_OK, HGTYPE, |
|
|
79 |
return [ |
|
|
78 | req.respond(HTTP_OK, HGTYPE, body=rsp) | |
|
79 | return [] | |
|
80 | 80 | elif isinstance(rsp, wireproto.streamres): |
|
81 | 81 | req.respond(HTTP_OK, HGTYPE) |
|
82 | 82 | return rsp.gen |
|
83 | 83 | elif isinstance(rsp, wireproto.pushres): |
|
84 | 84 | val = p.restore() |
|
85 | req.respond(HTTP_OK, HGTYPE) | |
|
86 | return ['%d\n%s' % (rsp.res, val)] | |
|
85 | rsp = '%d\n%s' % (rsp.res, val) | |
|
86 | req.respond(HTTP_OK, HGTYPE, body=rsp) | |
|
87 | return [] | |
|
87 | 88 | elif isinstance(rsp, wireproto.pusherr): |
|
88 | 89 | # drain the incoming bundle |
|
89 | 90 | req.drain() |
|
90 | 91 | p.restore() |
|
91 | 92 | rsp = '0\n%s\n' % rsp.res |
|
92 |
req.respond(HTTP_OK, HGTYPE, |
|
|
93 |
return [ |
|
|
93 | req.respond(HTTP_OK, HGTYPE, body=rsp) | |
|
94 | return [] | |
|
94 | 95 | elif isinstance(rsp, wireproto.ooberror): |
|
95 | 96 | rsp = rsp.message |
|
96 |
req.respond(HTTP_OK, HGERRTYPE, |
|
|
97 |
return [ |
|
|
97 | req.respond(HTTP_OK, HGERRTYPE, body=rsp) | |
|
98 | return [] |
@@ -70,19 +70,23 b' class wsgirequest(object):' | |||
|
70 | 70 | for s in util.filechunkiter(self.inp, limit=length): |
|
71 | 71 | pass |
|
72 | 72 | |
|
73 |
def respond(self, status, type |
|
|
73 | def respond(self, status, type, filename=None, body=None): | |
|
74 | 74 | if self._start_response is not None: |
|
75 | ||
|
76 |
|
|
|
77 | if not self.headers: | |
|
78 | raise RuntimeError("request.write called before headers sent") | |
|
75 | self.headers.append(('Content-Type', type)) | |
|
76 | if filename: | |
|
77 | filename = (filename.split('/')[-1] | |
|
78 | .replace('\\', '\\\\').replace('"', '\\"')) | |
|
79 | self.headers.append(('Content-Disposition', | |
|
80 | 'inline; filename="%s"' % filename)) | |
|
81 | if body is not None: | |
|
82 | self.headers.append(('Content-Length', str(len(body)))) | |
|
79 | 83 | |
|
80 | 84 | for k, v in self.headers: |
|
81 | 85 | if not isinstance(v, str): |
|
82 | raise TypeError('header value must be string: %r' % v) | |
|
86 | raise TypeError('header value must be string: %r' % (v,)) | |
|
83 | 87 | |
|
84 | 88 | if isinstance(status, ErrorResponse): |
|
85 | self.header(status.headers) | |
|
89 | self.headers.extend(status.headers) | |
|
86 | 90 | if status.code == HTTP_NOT_MODIFIED: |
|
87 | 91 | # RFC 2616 Section 10.3.5: 304 Not Modified has cases where |
|
88 | 92 | # it MUST NOT include any headers other than these and no |
@@ -99,13 +103,12 b' class wsgirequest(object):' | |||
|
99 | 103 | self.server_write = self._start_response(status, self.headers) |
|
100 | 104 | self._start_response = None |
|
101 | 105 | self.headers = [] |
|
106 | if body is not None: | |
|
107 | self.write(body) | |
|
108 | self.server_write = None | |
|
102 | 109 | |
|
103 | 110 | def write(self, thing): |
|
104 | if util.safehasattr(thing, "__iter__"): | |
|
105 | for part in thing: | |
|
106 | self.write(part) | |
|
107 | else: | |
|
108 | thing = str(thing) | |
|
111 | if thing: | |
|
109 | 112 | try: |
|
110 | 113 | self.server_write(thing) |
|
111 | 114 | except socket.error, inst: |
@@ -122,22 +125,6 b' class wsgirequest(object):' | |||
|
122 | 125 | def close(self): |
|
123 | 126 | return None |
|
124 | 127 | |
|
125 | def header(self, headers=[('Content-Type','text/html')]): | |
|
126 | self.headers.extend(headers) | |
|
127 | ||
|
128 | def httphdr(self, type=None, filename=None, length=0, headers={}): | |
|
129 | headers = headers.items() | |
|
130 | if type is not None: | |
|
131 | headers.append(('Content-Type', type)) | |
|
132 | if filename: | |
|
133 | filename = (filename.split('/')[-1] | |
|
134 | .replace('\\', '\\\\').replace('"', '\\"')) | |
|
135 | headers.append(('Content-Disposition', | |
|
136 | 'inline; filename="%s"' % filename)) | |
|
137 | if length: | |
|
138 | headers.append(('Content-Length', str(length))) | |
|
139 | self.header(headers) | |
|
140 | ||
|
141 | 128 | def wsgiapplication(app_maker): |
|
142 | 129 | '''For compatibility with old CGI scripts. A plain hgweb() or hgwebdir() |
|
143 | 130 | can and should now be used as a WSGI application.''' |
@@ -129,13 +129,16 b' class _httprequesthandler(BaseHTTPServer' | |||
|
129 | 129 | SocketServer.ForkingMixIn) |
|
130 | 130 | env['wsgi.run_once'] = 0 |
|
131 | 131 | |
|
132 | self.close_connection = True | |
|
133 | 132 | self.saved_status = None |
|
134 | 133 | self.saved_headers = [] |
|
135 | 134 | self.sent_headers = False |
|
136 | 135 | self.length = None |
|
136 | self._chunked = None | |
|
137 | 137 | for chunk in self.server.application(env, self._start_response): |
|
138 | 138 | self._write(chunk) |
|
139 | if not self.sent_headers: | |
|
140 | self.send_headers() | |
|
141 | self._done() | |
|
139 | 142 | |
|
140 | 143 | def send_headers(self): |
|
141 | 144 | if not self.saved_status: |
@@ -144,20 +147,20 b' class _httprequesthandler(BaseHTTPServer' | |||
|
144 | 147 | saved_status = self.saved_status.split(None, 1) |
|
145 | 148 | saved_status[0] = int(saved_status[0]) |
|
146 | 149 | self.send_response(*saved_status) |
|
147 | should_close = True | |
|
150 | self.length = None | |
|
151 | self._chunked = False | |
|
148 | 152 | for h in self.saved_headers: |
|
149 | 153 | self.send_header(*h) |
|
150 | 154 | if h[0].lower() == 'content-length': |
|
151 | should_close = False | |
|
152 | 155 | self.length = int(h[1]) |
|
153 | # The value of the Connection header is a list of case-insensitive | |
|
154 | # tokens separated by commas and optional whitespace. | |
|
155 | if 'close' in [token.strip().lower() for token in | |
|
156 | self.headers.get('connection', '').split(',')]: | |
|
157 | should_close = True | |
|
158 | if should_close: | |
|
159 | self.send_header('Connection', 'close') | |
|
160 |
self. |
|
|
156 | if (self.length is None and | |
|
157 | saved_status[0] != common.HTTP_NOT_MODIFIED): | |
|
158 | self._chunked = (not self.close_connection and | |
|
159 | self.request_version == "HTTP/1.1") | |
|
160 | if self._chunked: | |
|
161 | self.send_header('Transfer-Encoding', 'chunked') | |
|
162 | else: | |
|
163 | self.send_header('Connection', 'close') | |
|
161 | 164 | self.end_headers() |
|
162 | 165 | self.sent_headers = True |
|
163 | 166 | |
@@ -180,9 +183,16 b' class _httprequesthandler(BaseHTTPServer' | |||
|
180 | 183 | raise AssertionError("Content-length header sent, but more " |
|
181 | 184 | "bytes than specified are being written.") |
|
182 | 185 | self.length = self.length - len(data) |
|
186 | elif self._chunked and data: | |
|
187 | data = '%x\r\n%s\r\n' % (len(data), data) | |
|
183 | 188 | self.wfile.write(data) |
|
184 | 189 | self.wfile.flush() |
|
185 | 190 | |
|
191 | def _done(self): | |
|
192 | if self._chunked: | |
|
193 | self.wfile.write('0\r\n\r\n') | |
|
194 | self.wfile.flush() | |
|
195 | ||
|
186 | 196 | class _httprequesthandleropenssl(_httprequesthandler): |
|
187 | 197 | """HTTPS handler based on pyOpenSSL""" |
|
188 | 198 |
@@ -14,6 +14,7 b' from common import paritygen, staticfile' | |||
|
14 | 14 | from common import HTTP_OK, HTTP_FORBIDDEN, HTTP_NOT_FOUND |
|
15 | 15 | from mercurial import graphmod, patch |
|
16 | 16 | from mercurial import help as helpmod |
|
17 | from mercurial import scmutil | |
|
17 | 18 | from mercurial.i18n import _ |
|
18 | 19 | |
|
19 | 20 | # __all__ is populated with the allowed commands. Be sure to add to it if |
@@ -60,8 +61,8 b' def rawfile(web, req, tmpl):' | |||
|
60 | 61 | if mt.startswith('text/'): |
|
61 | 62 | mt += '; charset="%s"' % encoding.encoding |
|
62 | 63 | |
|
63 |
req.respond(HTTP_OK, mt, path, |
|
|
64 |
return [ |
|
|
64 | req.respond(HTTP_OK, mt, path, body=text) | |
|
65 | return [] | |
|
65 | 66 | |
|
66 | 67 | def _filerevision(web, tmpl, fctx): |
|
67 | 68 | f = fctx.path() |
@@ -193,34 +194,37 b' def changelog(web, req, tmpl, shortlog=F' | |||
|
193 | 194 | except error.RepoError: |
|
194 | 195 | return _search(web, req, tmpl) # XXX redirect to 404 page? |
|
195 | 196 | |
|
196 |
def changelist(l |
|
|
197 | def changelist(latestonly, **map): | |
|
197 | 198 | l = [] # build a list in forward order for efficiency |
|
198 | for i in xrange(start, end): | |
|
199 | revs = [] | |
|
200 | if start < end: | |
|
201 | revs = web.repo.changelog.revs(start, end - 1) | |
|
202 | if latestonly: | |
|
203 | for r in revs: | |
|
204 | pass | |
|
205 | revs = (r,) | |
|
206 | for i in revs: | |
|
199 | 207 | ctx = web.repo[i] |
|
200 | 208 | n = ctx.node() |
|
201 | 209 | showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n) |
|
202 | 210 | files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles) |
|
203 | 211 | |
|
204 |
l. |
|
|
205 |
|
|
|
206 |
|
|
|
207 |
|
|
|
208 |
|
|
|
209 |
|
|
|
210 |
|
|
|
211 |
|
|
|
212 |
|
|
|
213 |
|
|
|
214 |
|
|
|
215 |
|
|
|
216 |
|
|
|
217 |
|
|
|
218 |
|
|
|
219 | ||
|
220 | if limit > 0: | |
|
221 | l = l[:limit] | |
|
222 | ||
|
223 | for e in l: | |
|
212 | l.append({"parity": parity.next(), | |
|
213 | "author": ctx.user(), | |
|
214 | "parent": webutil.parents(ctx, i - 1), | |
|
215 | "child": webutil.children(ctx, i + 1), | |
|
216 | "changelogtag": showtags, | |
|
217 | "desc": ctx.description(), | |
|
218 | "date": ctx.date(), | |
|
219 | "files": files, | |
|
220 | "rev": i, | |
|
221 | "node": hex(n), | |
|
222 | "tags": webutil.nodetagsdict(web.repo, n), | |
|
223 | "bookmarks": webutil.nodebookmarksdict(web.repo, n), | |
|
224 | "inbranch": webutil.nodeinbranch(web.repo, ctx), | |
|
225 | "branches": webutil.nodebranchdict(web.repo, ctx) | |
|
226 | }) | |
|
227 | for e in reversed(l): | |
|
224 | 228 | yield e |
|
225 | 229 | |
|
226 | 230 | revcount = shortlog and web.maxshortchanges or web.maxchanges |
@@ -241,12 +245,12 b' def changelog(web, req, tmpl, shortlog=F' | |||
|
241 | 245 | pos = end - 1 |
|
242 | 246 | parity = paritygen(web.stripecount, offset=start - end) |
|
243 | 247 | |
|
244 |
changenav = webutil.revnavgen(pos, revcount, count |
|
|
248 | changenav = webutil.revnav(web.repo).gen(pos, revcount, count) | |
|
245 | 249 | |
|
246 | 250 | return tmpl(shortlog and 'shortlog' or 'changelog', changenav=changenav, |
|
247 | 251 | node=ctx.hex(), rev=pos, changesets=count, |
|
248 |
entries=lambda **x: changelist(l |
|
|
249 |
latestentry=lambda **x: changelist(l |
|
|
252 | entries=lambda **x: changelist(latestonly=False, **x), | |
|
253 | latestentry=lambda **x: changelist(latestonly=True, **x), | |
|
250 | 254 | archives=web.archivelist("tip"), revcount=revcount, |
|
251 | 255 | morevars=morevars, lessvars=lessvars) |
|
252 | 256 | |
@@ -255,6 +259,9 b' def shortlog(web, req, tmpl):' | |||
|
255 | 259 | |
|
256 | 260 | def changeset(web, req, tmpl): |
|
257 | 261 | ctx = webutil.changectx(web.repo, req) |
|
262 | basectx = webutil.basechangectx(web.repo, req) | |
|
263 | if basectx is None: | |
|
264 | basectx = ctx.p1() | |
|
258 | 265 | showtags = webutil.showtag(web.repo, tmpl, 'changesettag', ctx.node()) |
|
259 | 266 | showbookmarks = webutil.showbookmark(web.repo, tmpl, 'changesetbookmark', |
|
260 | 267 | ctx.node()) |
@@ -273,10 +280,10 b' def changeset(web, req, tmpl):' | |||
|
273 | 280 | style = req.form['style'][0] |
|
274 | 281 | |
|
275 | 282 | parity = paritygen(web.stripecount) |
|
276 | diffs = webutil.diffs(web.repo, tmpl, ctx, None, parity, style) | |
|
283 | diffs = webutil.diffs(web.repo, tmpl, ctx, basectx, None, parity, style) | |
|
277 | 284 | |
|
278 | 285 | parity = paritygen(web.stripecount) |
|
279 | diffstatgen = webutil.diffstatgen(ctx) | |
|
286 | diffstatgen = webutil.diffstatgen(ctx, basectx) | |
|
280 | 287 | diffstat = webutil.diffstat(tmpl, ctx, diffstatgen, parity) |
|
281 | 288 | |
|
282 | 289 | return tmpl('changeset', |
@@ -285,6 +292,7 b' def changeset(web, req, tmpl):' | |||
|
285 | 292 | node=ctx.hex(), |
|
286 | 293 | parent=webutil.parents(ctx), |
|
287 | 294 | child=webutil.children(ctx), |
|
295 | currentbaseline=basectx.hex(), | |
|
288 | 296 | changesettag=showtags, |
|
289 | 297 | changesetbookmark=showbookmarks, |
|
290 | 298 | changesetbranch=showbranch, |
@@ -397,14 +405,13 b' def tags(web, req, tmpl):' | |||
|
397 | 405 | i = list(reversed(web.repo.tagslist())) |
|
398 | 406 | parity = paritygen(web.stripecount) |
|
399 | 407 | |
|
400 |
def entries(notip |
|
|
401 |
|
|
|
402 |
f |
|
|
403 | if notip and k == "tip": | |
|
404 | continue | |
|
405 | if limit > 0 and count >= limit: | |
|
406 | continue | |
|
407 | count = count + 1 | |
|
408 | def entries(notip, latestonly, **map): | |
|
409 | t = i | |
|
410 | if notip: | |
|
411 | t = [(k, n) for k, n in i if k != "tip"] | |
|
412 | if latestonly: | |
|
413 | t = t[:1] | |
|
414 | for k, n in t: | |
|
408 | 415 | yield {"parity": parity.next(), |
|
409 | 416 | "tag": k, |
|
410 | 417 | "date": web.repo[n].date(), |
@@ -412,20 +419,20 b' def tags(web, req, tmpl):' | |||
|
412 | 419 | |
|
413 | 420 | return tmpl("tags", |
|
414 | 421 | node=hex(web.repo.changelog.tip()), |
|
415 |
entries=lambda **x: entries(False, |
|
|
416 |
entriesnotip=lambda **x: entries(True, |
|
|
417 |
latestentry=lambda **x: entries(True, |
|
|
422 | entries=lambda **x: entries(False, False, **x), | |
|
423 | entriesnotip=lambda **x: entries(True, False, **x), | |
|
424 | latestentry=lambda **x: entries(True, True, **x)) | |
|
418 | 425 | |
|
419 | 426 | def bookmarks(web, req, tmpl): |
|
420 | 427 | i = web.repo._bookmarks.items() |
|
421 | 428 | parity = paritygen(web.stripecount) |
|
422 | 429 | |
|
423 |
def entries(l |
|
|
424 | count = 0 | |
|
425 | for k, n in sorted(i): | |
|
426 | if limit > 0 and count >= limit: | |
|
427 |
|
|
|
428 | count = count + 1 | |
|
430 | def entries(latestonly, **map): | |
|
431 | if latestonly: | |
|
432 | t = [min(i)] | |
|
433 | else: | |
|
434 | t = sorted(i) | |
|
435 | for k, n in t: | |
|
429 | 436 | yield {"parity": parity.next(), |
|
430 | 437 | "bookmark": k, |
|
431 | 438 | "date": web.repo[n].date(), |
@@ -433,8 +440,8 b' def bookmarks(web, req, tmpl):' | |||
|
433 | 440 | |
|
434 | 441 | return tmpl("bookmarks", |
|
435 | 442 | node=hex(web.repo.changelog.tip()), |
|
436 |
entries=lambda **x: entries( |
|
|
437 |
latestentry=lambda **x: entries( |
|
|
443 | entries=lambda **x: entries(latestonly=False, **x), | |
|
444 | latestentry=lambda **x: entries(latestonly=True, **x)) | |
|
438 | 445 | |
|
439 | 446 | def branches(web, req, tmpl): |
|
440 | 447 | tips = [] |
@@ -515,7 +522,7 b' def summary(web, req, tmpl):' | |||
|
515 | 522 | n = ctx.node() |
|
516 | 523 | hn = hex(n) |
|
517 | 524 | |
|
518 |
l. |
|
|
525 | l.append(tmpl( | |
|
519 | 526 | 'shortlogentry', |
|
520 | 527 | parity=parity.next(), |
|
521 | 528 | author=ctx.user(), |
@@ -528,6 +535,7 b' def summary(web, req, tmpl):' | |||
|
528 | 535 | inbranch=webutil.nodeinbranch(web.repo, ctx), |
|
529 | 536 | branches=webutil.nodebranchdict(web.repo, ctx))) |
|
530 | 537 | |
|
538 | l.reverse() | |
|
531 | 539 | yield l |
|
532 | 540 | |
|
533 | 541 | tip = web.repo['tip'] |
@@ -569,7 +577,7 b' def filediff(web, req, tmpl):' | |||
|
569 | 577 | if 'style' in req.form: |
|
570 | 578 | style = req.form['style'][0] |
|
571 | 579 | |
|
572 | diffs = webutil.diffs(web.repo, tmpl, ctx, [path], parity, style) | |
|
580 | diffs = webutil.diffs(web.repo, tmpl, ctx, None, [path], parity, style) | |
|
573 | 581 | rename = fctx and webutil.renamelink(fctx) or [] |
|
574 | 582 | ctx = fctx and fctx or ctx |
|
575 | 583 | return tmpl("filediff", |
@@ -736,41 +744,42 b' def filelog(web, req, tmpl):' | |||
|
736 | 744 | end = min(count, start + revcount) # last rev on this page |
|
737 | 745 | parity = paritygen(web.stripecount, offset=start - end) |
|
738 | 746 | |
|
739 |
def entries(l |
|
|
747 | def entries(latestonly, **map): | |
|
740 | 748 | l = [] |
|
741 | 749 | |
|
742 | 750 | repo = web.repo |
|
743 | for i in xrange(start, end): | |
|
751 | revs = repo.changelog.revs(start, end - 1) | |
|
752 | if latestonly: | |
|
753 | for r in revs: | |
|
754 | pass | |
|
755 | revs = (r,) | |
|
756 | for i in revs: | |
|
744 | 757 | iterfctx = fctx.filectx(i) |
|
745 | 758 | |
|
746 |
l. |
|
|
747 |
|
|
|
748 |
|
|
|
749 |
|
|
|
750 |
|
|
|
751 |
|
|
|
752 |
|
|
|
753 |
|
|
|
754 |
|
|
|
755 |
|
|
|
756 |
|
|
|
757 |
|
|
|
758 |
|
|
|
759 |
|
|
|
760 |
|
|
|
761 |
|
|
|
762 | ||
|
763 | if limit > 0: | |
|
764 | l = l[:limit] | |
|
765 | ||
|
766 | for e in l: | |
|
759 | l.append({"parity": parity.next(), | |
|
760 | "filerev": i, | |
|
761 | "file": f, | |
|
762 | "node": iterfctx.hex(), | |
|
763 | "author": iterfctx.user(), | |
|
764 | "date": iterfctx.date(), | |
|
765 | "rename": webutil.renamelink(iterfctx), | |
|
766 | "parent": webutil.parents(iterfctx), | |
|
767 | "child": webutil.children(iterfctx), | |
|
768 | "desc": iterfctx.description(), | |
|
769 | "tags": webutil.nodetagsdict(repo, iterfctx.node()), | |
|
770 | "bookmarks": webutil.nodebookmarksdict( | |
|
771 | repo, iterfctx.node()), | |
|
772 | "branch": webutil.nodebranchnodefault(iterfctx), | |
|
773 | "inbranch": webutil.nodeinbranch(repo, iterfctx), | |
|
774 | "branches": webutil.nodebranchdict(repo, iterfctx)}) | |
|
775 | for e in reversed(l): | |
|
767 | 776 | yield e |
|
768 | 777 | |
|
769 | nodefunc = lambda x: fctx.filectx(fileid=x) | |
|
770 |
nav = |
|
|
778 | revnav = webutil.filerevnav(web.repo, fctx.path()) | |
|
779 | nav = revnav.gen(end - 1, revcount, count) | |
|
771 | 780 | return tmpl("filelog", file=f, node=fctx.hex(), nav=nav, |
|
772 |
entries=lambda **x: entries(l |
|
|
773 |
latestentry=lambda **x: entries(l |
|
|
781 | entries=lambda **x: entries(latestonly=False, **x), | |
|
782 | latestentry=lambda **x: entries(latestonly=True, **x), | |
|
774 | 783 | revcount=revcount, morevars=morevars, lessvars=lessvars) |
|
775 | 784 | |
|
776 | 785 | def archive(web, req, tmpl): |
@@ -795,14 +804,17 b' def archive(web, req, tmpl):' | |||
|
795 | 804 | name = "%s-%s" % (reponame, arch_version) |
|
796 | 805 | mimetype, artype, extension, encoding = web.archive_specs[type_] |
|
797 | 806 | headers = [ |
|
798 | ('Content-Type', mimetype), | |
|
799 | 807 | ('Content-Disposition', 'attachment; filename=%s%s' % (name, extension)) |
|
800 | ] | |
|
808 | ] | |
|
801 | 809 | if encoding: |
|
802 | 810 | headers.append(('Content-Encoding', encoding)) |
|
803 | req.header(headers) | |
|
804 | req.respond(HTTP_OK) | |
|
805 | archival.archive(web.repo, req, cnode, artype, prefix=name) | |
|
811 | req.headers.extend(headers) | |
|
812 | req.respond(HTTP_OK, mimetype) | |
|
813 | ||
|
814 | ctx = webutil.changectx(web.repo, req) | |
|
815 | archival.archive(web.repo, req, cnode, artype, prefix=name, | |
|
816 | matchfn=scmutil.match(ctx, []), | |
|
817 | subrepos=web.configbool("web", "archivesubrepos")) | |
|
806 | 818 | return [] |
|
807 | 819 | |
|
808 | 820 | |
@@ -843,10 +855,13 b' def graph(web, req, tmpl):' | |||
|
843 | 855 | |
|
844 | 856 | uprev = min(max(0, count - 1), rev + revcount) |
|
845 | 857 | downrev = max(0, rev - revcount) |
|
846 |
changenav = webutil.revnavgen(pos, revcount, count |
|
|
858 | changenav = webutil.revnav(web.repo).gen(pos, revcount, count) | |
|
847 | 859 | |
|
848 | dag = graphmod.dagwalker(web.repo, range(start, end)[::-1]) | |
|
849 | tree = list(graphmod.colored(dag, web.repo)) | |
|
860 | tree = [] | |
|
861 | if start < end: | |
|
862 | revs = list(web.repo.changelog.revs(end - 1, start)) | |
|
863 | dag = graphmod.dagwalker(web.repo, revs) | |
|
864 | tree = list(graphmod.colored(dag, web.repo)) | |
|
850 | 865 | |
|
851 | 866 | def getcolumns(tree): |
|
852 | 867 | cols = 0 |
@@ -24,46 +24,100 b' def up(p):' | |||
|
24 | 24 | return "/" |
|
25 | 25 | return up + "/" |
|
26 | 26 | |
|
27 | def revnavgen(pos, pagelen, limit, nodefunc): | |
|
28 | def seq(factor, limit=None): | |
|
29 |
|
|
|
30 | yield limit | |
|
31 | if limit >= 20 and limit <= 40: | |
|
32 |
|
|
|
33 | else: | |
|
34 | yield 1 * factor | |
|
35 | yield 3 * factor | |
|
36 |
|
|
|
37 | yield f | |
|
27 | def _navseq(step, firststep=None): | |
|
28 | if firststep: | |
|
29 | yield firststep | |
|
30 | if firststep >= 20 and firststep <= 40: | |
|
31 | firststep = 50 | |
|
32 | yield firststep | |
|
33 | assert step > 0 | |
|
34 | assert firststep > 0 | |
|
35 | while step <= firststep: | |
|
36 | step *= 10 | |
|
37 | while True: | |
|
38 | yield 1 * step | |
|
39 | yield 3 * step | |
|
40 | step *= 10 | |
|
41 | ||
|
42 | class revnav(object): | |
|
43 | ||
|
44 | def __init__(self, repo): | |
|
45 | """Navigation generation object | |
|
38 | 46 |
|
|
39 | navbefore = [] | |
|
40 | navafter = [] | |
|
47 | :repo: repo object we generate nav for | |
|
48 | """ | |
|
49 | # used for hex generation | |
|
50 | self._revlog = repo.changelog | |
|
51 | ||
|
52 | def __nonzero__(self): | |
|
53 | """return True if any revision to navigate over""" | |
|
54 | try: | |
|
55 | self._revlog.node(0) | |
|
56 | return True | |
|
57 | except error.RepoError: | |
|
58 | return False | |
|
59 | ||
|
60 | def hex(self, rev): | |
|
61 | return hex(self._revlog.node(rev)) | |
|
62 | ||
|
63 | def gen(self, pos, pagelen, limit): | |
|
64 | """computes label and revision id for navigation link | |
|
65 | ||
|
66 | :pos: is the revision relative to which we generate navigation. | |
|
67 | :pagelen: the size of each navigation page | |
|
68 | :limit: how far shall we link | |
|
41 | 69 |
|
|
42 | last = 0 | |
|
43 | for f in seq(1, pagelen): | |
|
44 | if f < pagelen or f <= last: | |
|
45 | continue | |
|
46 | if f > limit: | |
|
47 | break | |
|
48 |
|
|
|
49 | if pos + f < limit: | |
|
50 | navafter.append(("+%d" % f, hex(nodefunc(pos + f).node()))) | |
|
51 | if pos - f >= 0: | |
|
52 | navbefore.insert(0, ("-%d" % f, hex(nodefunc(pos - f).node()))) | |
|
70 | The return is: | |
|
71 | - a single element tuple | |
|
72 | - containing a dictionary with a `before` and `after` key | |
|
73 | - values are generator functions taking arbitrary number of kwargs | |
|
74 | - yield items are dictionaries with `label` and `node` keys | |
|
75 | """ | |
|
76 | if not self: | |
|
77 | # empty repo | |
|
78 | return ({'before': (), 'after': ()},) | |
|
79 | ||
|
80 | targets = [] | |
|
81 | for f in _navseq(1, pagelen): | |
|
82 | if f > limit: | |
|
83 | break | |
|
84 | targets.append(pos + f) | |
|
85 | targets.append(pos - f) | |
|
86 | targets.sort() | |
|
53 | 87 | |
|
54 | navafter.append(("tip", "tip")) | |
|
55 | try: | |
|
56 | navbefore.insert(0, ("(0)", hex(nodefunc('0').node()))) | |
|
57 | except error.RepoError: | |
|
58 | pass | |
|
88 | navbefore = [("(0)", self.hex(0))] | |
|
89 | navafter = [] | |
|
90 | for rev in targets: | |
|
91 | if rev not in self._revlog: | |
|
92 | continue | |
|
93 | if pos < rev < limit: | |
|
94 | navafter.append(("+%d" % f, self.hex(rev))) | |
|
95 | if 0 < rev < pos: | |
|
96 | navbefore.append(("-%d" % f, self.hex(rev))) | |
|
97 | ||
|
98 | ||
|
99 | navafter.append(("tip", "tip")) | |
|
100 | ||
|
101 | data = lambda i: {"label": i[0], "node": i[1]} | |
|
102 | return ({'before': lambda **map: (data(i) for i in navbefore), | |
|
103 | 'after': lambda **map: (data(i) for i in navafter)},) | |
|
59 | 104 | |
|
60 | def gen(l): | |
|
61 | def f(**map): | |
|
62 | for label, node in l: | |
|
63 | yield {"label": label, "node": node} | |
|
64 | return f | |
|
105 | class filerevnav(revnav): | |
|
106 | ||
|
107 | def __init__(self, repo, path): | |
|
108 | """Navigation generation object | |
|
65 | 109 |
|
|
66 | return (dict(before=gen(navbefore), after=gen(navafter)),) | |
|
110 | :repo: repo object we generate nav for | |
|
111 | :path: path of the file we generate nav for | |
|
112 | """ | |
|
113 | # used for iteration | |
|
114 | self._changelog = repo.unfiltered().changelog | |
|
115 | # used for hex generation | |
|
116 | self._revlog = repo.file(path) | |
|
117 | ||
|
118 | def hex(self, rev): | |
|
119 | return hex(self._changelog.node(self._revlog.linkrev(rev))) | |
|
120 | ||
|
67 | 121 | |
|
68 | 122 | def _siblings(siblings=[], hiderev=None): |
|
69 | 123 | siblings = [s for s in siblings if s.node() != nullid] |
@@ -140,13 +194,7 b' def cleanpath(repo, path):' | |||
|
140 | 194 | path = path.lstrip('/') |
|
141 | 195 | return scmutil.canonpath(repo.root, '', path) |
|
142 | 196 | |
|
143 |
def changectx(repo, |
|
|
144 | changeid = "tip" | |
|
145 | if 'node' in req.form: | |
|
146 | changeid = req.form['node'][0] | |
|
147 | elif 'manifest' in req.form: | |
|
148 | changeid = req.form['manifest'][0] | |
|
149 | ||
|
197 | def changeidctx (repo, changeid): | |
|
150 | 198 | try: |
|
151 | 199 | ctx = repo[changeid] |
|
152 | 200 | except error.RepoError: |
@@ -155,6 +203,28 b' def changectx(repo, req):' | |||
|
155 | 203 | |
|
156 | 204 | return ctx |
|
157 | 205 | |
|
206 | def changectx (repo, req): | |
|
207 | changeid = "tip" | |
|
208 | if 'node' in req.form: | |
|
209 | changeid = req.form['node'][0] | |
|
210 | ipos=changeid.find(':') | |
|
211 | if ipos != -1: | |
|
212 | changeid = changeid[(ipos + 1):] | |
|
213 | elif 'manifest' in req.form: | |
|
214 | changeid = req.form['manifest'][0] | |
|
215 | ||
|
216 | return changeidctx(repo, changeid) | |
|
217 | ||
|
218 | def basechangectx(repo, req): | |
|
219 | if 'node' in req.form: | |
|
220 | changeid = req.form['node'][0] | |
|
221 | ipos=changeid.find(':') | |
|
222 | if ipos != -1: | |
|
223 | changeid = changeid[:ipos] | |
|
224 | return changeidctx(repo, changeid) | |
|
225 | ||
|
226 | return None | |
|
227 | ||
|
158 | 228 | def filectx(repo, req): |
|
159 | 229 | if 'file' not in req.form: |
|
160 | 230 | raise ErrorResponse(HTTP_NOT_FOUND, 'file not given') |
@@ -178,7 +248,7 b' def listfilediffs(tmpl, files, node, max' | |||
|
178 | 248 | if len(files) > max: |
|
179 | 249 | yield tmpl('fileellipses') |
|
180 | 250 | |
|
181 | def diffs(repo, tmpl, ctx, files, parity, style): | |
|
251 | def diffs(repo, tmpl, ctx, basectx, files, parity, style): | |
|
182 | 252 | |
|
183 | 253 | def countgen(): |
|
184 | 254 | start = 1 |
@@ -209,8 +279,11 b' def diffs(repo, tmpl, ctx, files, parity' | |||
|
209 | 279 | m = match.always(repo.root, repo.getcwd()) |
|
210 | 280 | |
|
211 | 281 | diffopts = patch.diffopts(repo.ui, untrusted=True) |
|
212 | parents = ctx.parents() | |
|
213 | node1 = parents and parents[0].node() or nullid | |
|
282 | if basectx is None: | |
|
283 | parents = ctx.parents() | |
|
284 | node1 = parents and parents[0].node() or nullid | |
|
285 | else: | |
|
286 | node1 = basectx.node() | |
|
214 | 287 | node2 = ctx.node() |
|
215 | 288 | |
|
216 | 289 | block = [] |
@@ -274,10 +347,10 b' def compare(tmpl, context, leftlines, ri' | |||
|
274 | 347 | for oc in s.get_grouped_opcodes(n=context): |
|
275 | 348 | yield tmpl('comparisonblock', lines=getblock(oc)) |
|
276 | 349 | |
|
277 | def diffstatgen(ctx): | |
|
350 | def diffstatgen(ctx, basectx): | |
|
278 | 351 | '''Generator function that provides the diffstat data.''' |
|
279 | 352 | |
|
280 | stats = patch.diffstatdata(util.iterlines(ctx.diff())) | |
|
353 | stats = patch.diffstatdata(util.iterlines(ctx.diff(basectx))) | |
|
281 | 354 | maxname, maxtotal, addtotal, removetotal, binary = patch.diffstatsum(stats) |
|
282 | 355 | while True: |
|
283 | 356 | yield stats, maxname, maxtotal, addtotal, removetotal, binary |
@@ -321,7 +394,7 b' class sessionvars(object):' | |||
|
321 | 394 | return sessionvars(copy.copy(self.vars), self.start) |
|
322 | 395 | def __iter__(self): |
|
323 | 396 | separator = self.start |
|
324 | for key, value in self.vars.iteritems(): | |
|
397 | for key, value in sorted(self.vars.iteritems()): | |
|
325 | 398 | yield {'name': key, 'value': str(value), 'separator': separator} |
|
326 | 399 | separator = '&' |
|
327 | 400 |
@@ -7,7 +7,7 b'' | |||
|
7 | 7 | |
|
8 | 8 | from i18n import _ |
|
9 | 9 | import os, sys |
|
10 | import extensions, util | |
|
10 | import extensions, util, demandimport | |
|
11 | 11 | |
|
12 | 12 | def _pythonhook(ui, repo, name, hname, funcname, args, throw): |
|
13 | 13 | '''call python hook. hook is callable object, looked up as |
@@ -35,13 +35,17 b' def _pythonhook(ui, repo, name, hname, f' | |||
|
35 | 35 | sys.path = sys.path[:] + [modpath] |
|
36 | 36 | modname = modfile |
|
37 | 37 | try: |
|
38 | demandimport.disable() | |
|
38 | 39 | obj = __import__(modname) |
|
40 | demandimport.enable() | |
|
39 | 41 | except ImportError: |
|
40 | 42 | e1 = sys.exc_type, sys.exc_value, sys.exc_traceback |
|
41 | 43 | try: |
|
42 | 44 | # extensions are loaded with hgext_ prefix |
|
43 | 45 | obj = __import__("hgext_%s" % modname) |
|
46 | demandimport.enable() | |
|
44 | 47 | except ImportError: |
|
48 | demandimport.enable() | |
|
45 | 49 | e2 = sys.exc_type, sys.exc_value, sys.exc_traceback |
|
46 | 50 | if ui.tracebackflag: |
|
47 | 51 | ui.warn(_('exception from first failed import attempt:\n')) |
@@ -70,7 +70,7 b' except AttributeError:' | |||
|
70 | 70 | continue |
|
71 | 71 | break |
|
72 | 72 | if not sock: |
|
73 |
raise socket.error |
|
|
73 | raise socket.error(msg) | |
|
74 | 74 | return sock |
|
75 | 75 | |
|
76 | 76 | if ssl: |
@@ -46,12 +46,32 b' def ignorepats(lines):' | |||
|
46 | 46 | pat = line |
|
47 | 47 | break |
|
48 | 48 | elif line.startswith(s+':'): |
|
49 | pat = rels + line[len(s)+1:] | |
|
49 | pat = rels + line[len(s) + 1:] | |
|
50 | 50 | break |
|
51 | 51 | patterns.append(pat) |
|
52 | 52 | |
|
53 | 53 | return patterns, warnings |
|
54 | 54 | |
|
55 | def readpats(root, files, warn): | |
|
56 | '''return a dict mapping ignore-file-name to list-of-patterns''' | |
|
57 | ||
|
58 | pats = {} | |
|
59 | for f in files: | |
|
60 | if f in pats: | |
|
61 | continue | |
|
62 | try: | |
|
63 | pats[f] = [] | |
|
64 | fp = open(f) | |
|
65 | pats[f], warnings = ignorepats(fp) | |
|
66 | fp.close() | |
|
67 | for warning in warnings: | |
|
68 | warn("%s: %s\n" % (f, warning)) | |
|
69 | except IOError, inst: | |
|
70 | if f != files[0]: | |
|
71 | warn(_("skipping unreadable ignore file '%s': %s\n") % | |
|
72 | (f, inst.strerror)) | |
|
73 | return [(f, pats[f]) for f in files if f in pats] | |
|
74 | ||
|
55 | 75 | def ignore(root, files, warn): |
|
56 | 76 | '''return matcher covering patterns in 'files'. |
|
57 | 77 | |
@@ -72,22 +92,10 b' def ignore(root, files, warn):' | |||
|
72 | 92 | glob:pattern # non-rooted glob |
|
73 | 93 | pattern # pattern of the current default type''' |
|
74 | 94 | |
|
75 | pats = {} | |
|
76 | for f in files: | |
|
77 | try: | |
|
78 | pats[f] = [] | |
|
79 | fp = open(f) | |
|
80 | pats[f], warnings = ignorepats(fp) | |
|
81 | fp.close() | |
|
82 | for warning in warnings: | |
|
83 | warn("%s: %s\n" % (f, warning)) | |
|
84 | except IOError, inst: | |
|
85 | if f != files[0]: | |
|
86 | warn(_("skipping unreadable ignore file '%s': %s\n") % | |
|
87 | (f, inst.strerror)) | |
|
95 | pats = readpats(root, files, warn) | |
|
88 | 96 | |
|
89 | 97 | allpats = [] |
|
90 |
for patlist in pats |
|
|
98 | for f, patlist in pats: | |
|
91 | 99 | allpats.extend(patlist) |
|
92 | 100 | if not allpats: |
|
93 | 101 | return util.never |
@@ -96,7 +104,7 b' def ignore(root, files, warn):' | |||
|
96 | 104 | ignorefunc = match.match(root, '', [], allpats) |
|
97 | 105 | except util.Abort: |
|
98 | 106 | # Re-raise an exception where the src is the right file |
|
99 |
for f, patlist in pats |
|
|
107 | for f, patlist in pats: | |
|
100 | 108 | try: |
|
101 | 109 | match.match(root, '', [], patlist) |
|
102 | 110 | except util.Abort, inst: |
@@ -4,9 +4,9 b'' | |||
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 |
from node import |
|
|
7 | from node import hex, nullid, short | |
|
8 | 8 | from i18n import _ |
|
9 | import peer, changegroup, subrepo, discovery, pushkey, obsolete | |
|
9 | import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview | |
|
10 | 10 | import changelog, dirstate, filelog, manifest, context, bookmarks, phases |
|
11 | 11 | import lock, transaction, store, encoding, base85 |
|
12 | 12 | import scmutil, util, extensions, hook, error, revset |
@@ -15,14 +15,49 b' import merge as mergemod' | |||
|
15 | 15 | import tags as tagsmod |
|
16 | 16 | from lock import release |
|
17 | 17 | import weakref, errno, os, time, inspect |
|
18 | import branchmap | |
|
18 | 19 | propertycache = util.propertycache |
|
19 | 20 | filecache = scmutil.filecache |
|
20 | 21 | |
|
21 |
class |
|
|
22 | class repofilecache(filecache): | |
|
23 | """All filecache usage on repo are done for logic that should be unfiltered | |
|
24 | """ | |
|
25 | ||
|
26 | def __get__(self, repo, type=None): | |
|
27 | return super(repofilecache, self).__get__(repo.unfiltered(), type) | |
|
28 | def __set__(self, repo, value): | |
|
29 | return super(repofilecache, self).__set__(repo.unfiltered(), value) | |
|
30 | def __delete__(self, repo): | |
|
31 | return super(repofilecache, self).__delete__(repo.unfiltered()) | |
|
32 | ||
|
33 | class storecache(repofilecache): | |
|
22 | 34 | """filecache for files in the store""" |
|
23 | 35 | def join(self, obj, fname): |
|
24 | 36 | return obj.sjoin(fname) |
|
25 | 37 | |
|
38 | class unfilteredpropertycache(propertycache): | |
|
39 | """propertycache that apply to unfiltered repo only""" | |
|
40 | ||
|
41 | def __get__(self, repo, type=None): | |
|
42 | return super(unfilteredpropertycache, self).__get__(repo.unfiltered()) | |
|
43 | ||
|
44 | class filteredpropertycache(propertycache): | |
|
45 | """propertycache that must take filtering in account""" | |
|
46 | ||
|
47 | def cachevalue(self, obj, value): | |
|
48 | object.__setattr__(obj, self.name, value) | |
|
49 | ||
|
50 | ||
|
51 | def hasunfilteredcache(repo, name): | |
|
52 | """check if an repo and a unfilteredproperty cached value for <name>""" | |
|
53 | return name in vars(repo.unfiltered()) | |
|
54 | ||
|
55 | def unfilteredmethod(orig): | |
|
56 | """decorate method that always need to be run on unfiltered version""" | |
|
57 | def wrapper(repo, *args, **kwargs): | |
|
58 | return orig(repo.unfiltered(), *args, **kwargs) | |
|
59 | return wrapper | |
|
60 | ||
|
26 | 61 | MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle')) |
|
27 | 62 | LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset'])) |
|
28 | 63 | |
@@ -31,7 +66,7 b' class localpeer(peer.peerrepository):' | |||
|
31 | 66 | |
|
32 | 67 | def __init__(self, repo, caps=MODERNCAPS): |
|
33 | 68 | peer.peerrepository.__init__(self) |
|
34 | self._repo = repo | |
|
69 | self._repo = repo.filtered('served') | |
|
35 | 70 | self.ui = repo.ui |
|
36 | 71 | self._caps = repo._restrictcapabilities(caps) |
|
37 | 72 | self.requirements = repo.requirements |
@@ -56,10 +91,10 b' class localpeer(peer.peerrepository):' | |||
|
56 | 91 | return self._repo.lookup(key) |
|
57 | 92 | |
|
58 | 93 | def branchmap(self): |
|
59 |
return |
|
|
94 | return self._repo.branchmap() | |
|
60 | 95 | |
|
61 | 96 | def heads(self): |
|
62 |
return |
|
|
97 | return self._repo.heads() | |
|
63 | 98 | |
|
64 | 99 | def known(self, nodes): |
|
65 | 100 | return self._repo.known(nodes) |
@@ -112,6 +147,7 b' class localrepository(object):' | |||
|
112 | 147 | 'dotencode')) |
|
113 | 148 | openerreqs = set(('revlogv1', 'generaldelta')) |
|
114 | 149 | requirements = ['revlogv1'] |
|
150 | filtername = None | |
|
115 | 151 | |
|
116 | 152 | def _baserequirements(self, create): |
|
117 | 153 | return self.requirements[:] |
@@ -193,8 +229,7 b' class localrepository(object):' | |||
|
193 | 229 | self._writerequirements() |
|
194 | 230 | |
|
195 | 231 | |
|
196 |
self._branchcache = |
|
|
197 | self._branchcachetip = None | |
|
232 | self._branchcaches = {} | |
|
198 | 233 | self.filterpats = {} |
|
199 | 234 | self._datafilters = {} |
|
200 | 235 | self._transref = self._lockref = self._wlockref = None |
@@ -205,6 +240,15 b' class localrepository(object):' | |||
|
205 | 240 | # Maps a property name to its util.filecacheentry |
|
206 | 241 | self._filecache = {} |
|
207 | 242 | |
|
243 | # hold sets of revision to be filtered | |
|
244 | # should be cleared when something might have changed the filter value: | |
|
245 | # - new changesets, | |
|
246 | # - phase change, | |
|
247 | # - new obsolescence marker, | |
|
248 | # - working directory parent change, | |
|
249 | # - bookmark changes | |
|
250 | self.filteredrevcache = {} | |
|
251 | ||
|
208 | 252 | def close(self): |
|
209 | 253 | pass |
|
210 | 254 | |
@@ -218,7 +262,7 b' class localrepository(object):' | |||
|
218 | 262 | |
|
219 | 263 | def _writerequirements(self): |
|
220 | 264 | reqfile = self.opener("requires", "w") |
|
221 | for r in self.requirements: | |
|
265 | for r in sorted(self.requirements): | |
|
222 | 266 | reqfile.write("%s\n" % r) |
|
223 | 267 | reqfile.close() |
|
224 | 268 | |
@@ -263,17 +307,28 b' class localrepository(object):' | |||
|
263 | 307 | def peer(self): |
|
264 | 308 | return localpeer(self) # not cached to avoid reference cycle |
|
265 | 309 | |
|
266 | @filecache('bookmarks') | |
|
310 | def unfiltered(self): | |
|
311 | """Return unfiltered version of the repository | |
|
312 | ||
|
313 | Intended to be ovewritten by filtered repo.""" | |
|
314 | return self | |
|
315 | ||
|
316 | def filtered(self, name): | |
|
317 | """Return a filtered version of a repository""" | |
|
318 | # build a new class with the mixin and the current class | |
|
319 | # (possibily subclass of the repo) | |
|
320 | class proxycls(repoview.repoview, self.unfiltered().__class__): | |
|
321 | pass | |
|
322 | return proxycls(self, name) | |
|
323 | ||
|
324 | @repofilecache('bookmarks') | |
|
267 | 325 | def _bookmarks(self): |
|
268 |
return bookmarks.re |
|
|
326 | return bookmarks.bmstore(self) | |
|
269 | 327 | |
|
270 | @filecache('bookmarks.current') | |
|
328 | @repofilecache('bookmarks.current') | |
|
271 | 329 | def _bookmarkcurrent(self): |
|
272 | 330 | return bookmarks.readcurrent(self) |
|
273 | 331 | |
|
274 | def _writebookmarks(self, marks): | |
|
275 | bookmarks.write(self) | |
|
276 | ||
|
277 | 332 | def bookmarkheads(self, bookmark): |
|
278 | 333 | name = bookmark.split('@', 1)[0] |
|
279 | 334 | heads = [] |
@@ -295,27 +350,6 b' class localrepository(object):' | |||
|
295 | 350 | self.ui.warn(msg % len(list(store))) |
|
296 | 351 | return store |
|
297 | 352 | |
|
298 | @propertycache | |
|
299 | def hiddenrevs(self): | |
|
300 | """hiddenrevs: revs that should be hidden by command and tools | |
|
301 | ||
|
302 | This set is carried on the repo to ease initialization and lazy | |
|
303 | loading; it'll probably move back to changelog for efficiency and | |
|
304 | consistency reasons. | |
|
305 | ||
|
306 | Note that the hiddenrevs will needs invalidations when | |
|
307 | - a new changesets is added (possible unstable above extinct) | |
|
308 | - a new obsolete marker is added (possible new extinct changeset) | |
|
309 | ||
|
310 | hidden changesets cannot have non-hidden descendants | |
|
311 | """ | |
|
312 | hidden = set() | |
|
313 | if self.obsstore: | |
|
314 | ### hide extinct changeset that are not accessible by any mean | |
|
315 | hiddenquery = 'extinct() - ::(. + bookmark())' | |
|
316 | hidden.update(self.revs(hiddenquery)) | |
|
317 | return hidden | |
|
318 | ||
|
319 | 353 | @storecache('00changelog.i') |
|
320 | 354 | def changelog(self): |
|
321 | 355 | c = changelog.changelog(self.sopener) |
@@ -329,7 +363,7 b' class localrepository(object):' | |||
|
329 | 363 | def manifest(self): |
|
330 | 364 | return manifest.manifest(self.sopener) |
|
331 | 365 | |
|
332 | @filecache('dirstate') | |
|
366 | @repofilecache('dirstate') | |
|
333 | 367 | def dirstate(self): |
|
334 | 368 | warned = [0] |
|
335 | 369 | def validate(node): |
@@ -385,6 +419,7 b' class localrepository(object):' | |||
|
385 | 419 | def hook(self, name, throw=False, **args): |
|
386 | 420 | return hook.hook(self.ui, self, name, throw, **args) |
|
387 | 421 | |
|
422 | @unfilteredmethod | |
|
388 | 423 | def _tag(self, names, node, message, local, user, date, extra={}): |
|
389 | 424 | if isinstance(names, str): |
|
390 | 425 | names = (names,) |
@@ -482,7 +517,7 b' class localrepository(object):' | |||
|
482 | 517 | self.tags() # instantiate the cache |
|
483 | 518 | self._tag(names, node, message, local, user, date) |
|
484 | 519 | |
|
485 | @propertycache | |
|
520 | @filteredpropertycache | |
|
486 | 521 | def _tagscache(self): |
|
487 | 522 | '''Returns a tagscache object that contains various tags related |
|
488 | 523 | caches.''' |
@@ -594,43 +629,10 b' class localrepository(object):' | |||
|
594 | 629 | marks.append(bookmark) |
|
595 | 630 | return sorted(marks) |
|
596 | 631 | |
|
597 | def _branchtags(self, partial, lrev): | |
|
598 | # TODO: rename this function? | |
|
599 | tiprev = len(self) - 1 | |
|
600 | if lrev != tiprev: | |
|
601 | ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev)) | |
|
602 | self._updatebranchcache(partial, ctxgen) | |
|
603 | self._writebranchcache(partial, self.changelog.tip(), tiprev) | |
|
604 | ||
|
605 | return partial | |
|
606 | ||
|
607 | def updatebranchcache(self): | |
|
608 | tip = self.changelog.tip() | |
|
609 | if self._branchcache is not None and self._branchcachetip == tip: | |
|
610 | return | |
|
611 | ||
|
612 | oldtip = self._branchcachetip | |
|
613 | self._branchcachetip = tip | |
|
614 | if oldtip is None or oldtip not in self.changelog.nodemap: | |
|
615 | partial, last, lrev = self._readbranchcache() | |
|
616 | else: | |
|
617 | lrev = self.changelog.rev(oldtip) | |
|
618 | partial = self._branchcache | |
|
619 | ||
|
620 | self._branchtags(partial, lrev) | |
|
621 | # this private cache holds all heads (not just the branch tips) | |
|
622 | self._branchcache = partial | |
|
623 | ||
|
624 | 632 | def branchmap(self): |
|
625 | 633 | '''returns a dictionary {branch: [branchheads]}''' |
|
626 | if self.changelog.filteredrevs: | |
|
627 | # some changeset are excluded we can't use the cache | |
|
628 | branchmap = {} | |
|
629 | self._updatebranchcache(branchmap, (self[r] for r in self)) | |
|
630 | return branchmap | |
|
631 | else: | |
|
632 | self.updatebranchcache() | |
|
633 | return self._branchcache | |
|
634 | branchmap.updatecache(self) | |
|
635 | return self._branchcaches[self.filtername] | |
|
634 | 636 | |
|
635 | 637 | |
|
636 | 638 | def _branchtip(self, heads): |
@@ -656,109 +658,6 b' class localrepository(object):' | |||
|
656 | 658 | bt[bn] = self._branchtip(heads) |
|
657 | 659 | return bt |
|
658 | 660 | |
|
659 | def _readbranchcache(self): | |
|
660 | partial = {} | |
|
661 | try: | |
|
662 | f = self.opener("cache/branchheads") | |
|
663 | lines = f.read().split('\n') | |
|
664 | f.close() | |
|
665 | except (IOError, OSError): | |
|
666 | return {}, nullid, nullrev | |
|
667 | ||
|
668 | try: | |
|
669 | last, lrev = lines.pop(0).split(" ", 1) | |
|
670 | last, lrev = bin(last), int(lrev) | |
|
671 | if lrev >= len(self) or self[lrev].node() != last: | |
|
672 | # invalidate the cache | |
|
673 | raise ValueError('invalidating branch cache (tip differs)') | |
|
674 | for l in lines: | |
|
675 | if not l: | |
|
676 | continue | |
|
677 | node, label = l.split(" ", 1) | |
|
678 | label = encoding.tolocal(label.strip()) | |
|
679 | if not node in self: | |
|
680 | raise ValueError('invalidating branch cache because node '+ | |
|
681 | '%s does not exist' % node) | |
|
682 | partial.setdefault(label, []).append(bin(node)) | |
|
683 | except KeyboardInterrupt: | |
|
684 | raise | |
|
685 | except Exception, inst: | |
|
686 | if self.ui.debugflag: | |
|
687 | self.ui.warn(str(inst), '\n') | |
|
688 | partial, last, lrev = {}, nullid, nullrev | |
|
689 | return partial, last, lrev | |
|
690 | ||
|
691 | def _writebranchcache(self, branches, tip, tiprev): | |
|
692 | try: | |
|
693 | f = self.opener("cache/branchheads", "w", atomictemp=True) | |
|
694 | f.write("%s %s\n" % (hex(tip), tiprev)) | |
|
695 | for label, nodes in branches.iteritems(): | |
|
696 | for node in nodes: | |
|
697 | f.write("%s %s\n" % (hex(node), encoding.fromlocal(label))) | |
|
698 | f.close() | |
|
699 | except (IOError, OSError): | |
|
700 | pass | |
|
701 | ||
|
702 | def _updatebranchcache(self, partial, ctxgen): | |
|
703 | """Given a branchhead cache, partial, that may have extra nodes or be | |
|
704 | missing heads, and a generator of nodes that are at least a superset of | |
|
705 | heads missing, this function updates partial to be correct. | |
|
706 | """ | |
|
707 | # collect new branch entries | |
|
708 | newbranches = {} | |
|
709 | for c in ctxgen: | |
|
710 | newbranches.setdefault(c.branch(), []).append(c.node()) | |
|
711 | # if older branchheads are reachable from new ones, they aren't | |
|
712 | # really branchheads. Note checking parents is insufficient: | |
|
713 | # 1 (branch a) -> 2 (branch b) -> 3 (branch a) | |
|
714 | for branch, newnodes in newbranches.iteritems(): | |
|
715 | bheads = partial.setdefault(branch, []) | |
|
716 | # Remove candidate heads that no longer are in the repo (e.g., as | |
|
717 | # the result of a strip that just happened). Avoid using 'node in | |
|
718 | # self' here because that dives down into branchcache code somewhat | |
|
719 | # recursively. | |
|
720 | bheadrevs = [self.changelog.rev(node) for node in bheads | |
|
721 | if self.changelog.hasnode(node)] | |
|
722 | newheadrevs = [self.changelog.rev(node) for node in newnodes | |
|
723 | if self.changelog.hasnode(node)] | |
|
724 | ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs) | |
|
725 | # Remove duplicates - nodes that are in newheadrevs and are already | |
|
726 | # in bheadrevs. This can happen if you strip a node whose parent | |
|
727 | # was already a head (because they're on different branches). | |
|
728 | bheadrevs = sorted(set(bheadrevs).union(newheadrevs)) | |
|
729 | ||
|
730 | # Starting from tip means fewer passes over reachable. If we know | |
|
731 | # the new candidates are not ancestors of existing heads, we don't | |
|
732 | # have to examine ancestors of existing heads | |
|
733 | if ctxisnew: | |
|
734 | iterrevs = sorted(newheadrevs) | |
|
735 | else: | |
|
736 | iterrevs = list(bheadrevs) | |
|
737 | ||
|
738 | # This loop prunes out two kinds of heads - heads that are | |
|
739 | # superseded by a head in newheadrevs, and newheadrevs that are not | |
|
740 | # heads because an existing head is their descendant. | |
|
741 | while iterrevs: | |
|
742 | latest = iterrevs.pop() | |
|
743 | if latest not in bheadrevs: | |
|
744 | continue | |
|
745 | ancestors = set(self.changelog.ancestors([latest], | |
|
746 | bheadrevs[0])) | |
|
747 | if ancestors: | |
|
748 | bheadrevs = [b for b in bheadrevs if b not in ancestors] | |
|
749 | partial[branch] = [self.changelog.node(rev) for rev in bheadrevs] | |
|
750 | ||
|
751 | # There may be branches that cease to exist when the last commit in the | |
|
752 | # branch was stripped. This code filters them out. Note that the | |
|
753 | # branch that ceased to exist may not be in newbranches because | |
|
754 | # newbranches is the set of candidate heads, which when you strip the | |
|
755 | # last commit in a branch will be the parent branch. | |
|
756 | for branch in partial.keys(): | |
|
757 | nodes = [head for head in partial[branch] | |
|
758 | if self.changelog.hasnode(head)] | |
|
759 | if not nodes: | |
|
760 | del partial[branch] | |
|
761 | ||
|
762 | 661 | def lookup(self, key): |
|
763 | 662 | return self[key].node() |
|
764 | 663 | |
@@ -865,11 +764,11 b' class localrepository(object):' | |||
|
865 | 764 | |
|
866 | 765 | return data |
|
867 | 766 | |
|
868 | @propertycache | |
|
767 | @unfilteredpropertycache | |
|
869 | 768 | def _encodefilterpats(self): |
|
870 | 769 | return self._loadfilter('encode') |
|
871 | 770 | |
|
872 | @propertycache | |
|
771 | @unfilteredpropertycache | |
|
873 | 772 | def _decodefilterpats(self): |
|
874 | 773 | return self._loadfilter('decode') |
|
875 | 774 | |
@@ -964,6 +863,7 b' class localrepository(object):' | |||
|
964 | 863 | finally: |
|
965 | 864 | release(lock, wlock) |
|
966 | 865 | |
|
866 | @unfilteredmethod # Until we get smarter cache management | |
|
967 | 867 | def _rollback(self, dryrun, force): |
|
968 | 868 | ui = self.ui |
|
969 | 869 | try: |
@@ -995,6 +895,7 b' class localrepository(object):' | |||
|
995 | 895 | return 0 |
|
996 | 896 | |
|
997 | 897 | parents = self.dirstate.parents() |
|
898 | self.destroying() | |
|
998 | 899 | transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn) |
|
999 | 900 | if os.path.exists(self.join('undo.bookmarks')): |
|
1000 | 901 | util.rename(self.join('undo.bookmarks'), |
@@ -1004,9 +905,6 b' class localrepository(object):' | |||
|
1004 | 905 | self.sjoin('phaseroots')) |
|
1005 | 906 | self.invalidate() |
|
1006 | 907 | |
|
1007 | # Discard all cache entries to force reloading everything. | |
|
1008 | self._filecache.clear() | |
|
1009 | ||
|
1010 | 908 | parentgone = (parents[0] not in self.changelog.nodemap or |
|
1011 | 909 | parents[1] not in self.changelog.nodemap) |
|
1012 | 910 | if parentgone: |
@@ -1034,16 +932,16 b' class localrepository(object):' | |||
|
1034 | 932 | return 0 |
|
1035 | 933 | |
|
1036 | 934 | def invalidatecaches(self): |
|
1037 | def delcache(name): | |
|
1038 | try: | |
|
1039 |
|
|
|
1040 | except AttributeError: | |
|
1041 | pass | |
|
935 | ||
|
936 | if '_tagscache' in vars(self): | |
|
937 | # can't use delattr on proxy | |
|
938 | del self.__dict__['_tagscache'] | |
|
1042 | 939 | |
|
1043 | delcache('_tagscache') | |
|
940 | self.unfiltered()._branchcaches.clear() | |
|
941 | self.invalidatevolatilesets() | |
|
1044 | 942 | |
|
1045 | self._branchcache = None # in UTF-8 | |
|
1046 | self._branchcachetip = None | |
|
943 | def invalidatevolatilesets(self): | |
|
944 | self.filteredrevcache.clear() | |
|
1047 | 945 | obsolete.clearobscaches(self) |
|
1048 | 946 | |
|
1049 | 947 | def invalidatedirstate(self): |
@@ -1055,22 +953,23 b' class localrepository(object):' | |||
|
1055 | 953 | rereads the dirstate. Use dirstate.invalidate() if you want to |
|
1056 | 954 | explicitly read the dirstate again (i.e. restoring it to a previous |
|
1057 | 955 | known good state).''' |
|
1058 | if 'dirstate' in self.__dict__: | |
|
956 | if hasunfilteredcache(self, 'dirstate'): | |
|
1059 | 957 | for k in self.dirstate._filecache: |
|
1060 | 958 | try: |
|
1061 | 959 | delattr(self.dirstate, k) |
|
1062 | 960 | except AttributeError: |
|
1063 | 961 | pass |
|
1064 | delattr(self, 'dirstate') | |
|
962 | delattr(self.unfiltered(), 'dirstate') | |
|
1065 | 963 | |
|
1066 | 964 | def invalidate(self): |
|
965 | unfiltered = self.unfiltered() # all filecaches are stored on unfiltered | |
|
1067 | 966 | for k in self._filecache: |
|
1068 | 967 | # dirstate is invalidated separately in invalidatedirstate() |
|
1069 | 968 | if k == 'dirstate': |
|
1070 | 969 | continue |
|
1071 | 970 | |
|
1072 | 971 | try: |
|
1073 |
delattr( |
|
|
972 | delattr(unfiltered, k) | |
|
1074 | 973 | except AttributeError: |
|
1075 | 974 | pass |
|
1076 | 975 | self.invalidatecaches() |
@@ -1111,10 +1010,10 b' class localrepository(object):' | |||
|
1111 | 1010 | |
|
1112 | 1011 | def unlock(): |
|
1113 | 1012 | self.store.write() |
|
1114 |
if '_phasecache' |
|
|
1013 | if hasunfilteredcache(self, '_phasecache'): | |
|
1115 | 1014 | self._phasecache.write() |
|
1116 | 1015 | for k, ce in self._filecache.items(): |
|
1117 | if k == 'dirstate': | |
|
1016 | if k == 'dirstate' or k not in self.__dict__: | |
|
1118 | 1017 | continue |
|
1119 | 1018 | ce.refresh() |
|
1120 | 1019 | |
@@ -1134,9 +1033,7 b' class localrepository(object):' | |||
|
1134 | 1033 | |
|
1135 | 1034 | def unlock(): |
|
1136 | 1035 | self.dirstate.write() |
|
1137 |
|
|
|
1138 | if ce: | |
|
1139 | ce.refresh() | |
|
1036 | self._filecache['dirstate'].refresh() | |
|
1140 | 1037 | |
|
1141 | 1038 | l = self._lock(self.join("wlock"), wait, unlock, |
|
1142 | 1039 | self.invalidatedirstate, _('working directory of %s') % |
@@ -1224,6 +1121,7 b' class localrepository(object):' | |||
|
1224 | 1121 | |
|
1225 | 1122 | return fparent1 |
|
1226 | 1123 | |
|
1124 | @unfilteredmethod | |
|
1227 | 1125 | def commit(self, text="", user=None, date=None, match=None, force=False, |
|
1228 | 1126 | editor=False, extra={}): |
|
1229 | 1127 | """Add a new revision to current repository. |
@@ -1394,6 +1292,7 b' class localrepository(object):' | |||
|
1394 | 1292 | self._afterlock(commithook) |
|
1395 | 1293 | return ret |
|
1396 | 1294 | |
|
1295 | @unfilteredmethod | |
|
1397 | 1296 | def commitctx(self, ctx, error=False): |
|
1398 | 1297 | """Add a new revision to current repository. |
|
1399 | 1298 | Revision information is passed via the context argument. |
@@ -1468,14 +1367,33 b' class localrepository(object):' | |||
|
1468 | 1367 | # if minimal phase was 0 we don't need to retract anything |
|
1469 | 1368 | phases.retractboundary(self, targetphase, [n]) |
|
1470 | 1369 | tr.close() |
|
1471 | self.updatebranchcache() | |
|
1370 | branchmap.updatecache(self.filtered('served')) | |
|
1472 | 1371 | return n |
|
1473 | 1372 | finally: |
|
1474 | 1373 | if tr: |
|
1475 | 1374 | tr.release() |
|
1476 | 1375 | lock.release() |
|
1477 | 1376 | |
|
1478 | def destroyed(self, newheadnodes=None): | |
|
1377 | @unfilteredmethod | |
|
1378 | def destroying(self): | |
|
1379 | '''Inform the repository that nodes are about to be destroyed. | |
|
1380 | Intended for use by strip and rollback, so there's a common | |
|
1381 | place for anything that has to be done before destroying history. | |
|
1382 | ||
|
1383 | This is mostly useful for saving state that is in memory and waiting | |
|
1384 | to be flushed when the current lock is released. Because a call to | |
|
1385 | destroyed is imminent, the repo will be invalidated causing those | |
|
1386 | changes to stay in memory (waiting for the next unlock), or vanish | |
|
1387 | completely. | |
|
1388 | ''' | |
|
1389 | # When using the same lock to commit and strip, the phasecache is left | |
|
1390 | # dirty after committing. Then when we strip, the repo is invalidated, | |
|
1391 | # causing those changes to disappear. | |
|
1392 | if '_phasecache' in vars(self): | |
|
1393 | self._phasecache.write() | |
|
1394 | ||
|
1395 | @unfilteredmethod | |
|
1396 | def destroyed(self): | |
|
1479 | 1397 | '''Inform the repository that nodes have been destroyed. |
|
1480 | 1398 | Intended for use by strip and rollback, so there's a common |
|
1481 | 1399 | place for anything that has to be done after destroying history. |
@@ -1486,16 +1404,22 b' class localrepository(object):' | |||
|
1486 | 1404 | code to update the branchheads cache, rather than having future code |
|
1487 | 1405 | decide it's invalid and regenerating it from scratch. |
|
1488 | 1406 | ''' |
|
1489 | # If we have info, newheadnodes, on how to update the branch cache, do | |
|
1490 | # it, Otherwise, since nodes were destroyed, the cache is stale and this | |
|
1491 | # will be caught the next time it is read. | |
|
1492 | if newheadnodes: | |
|
1493 | tiprev = len(self) - 1 | |
|
1494 | ctxgen = (self[node] for node in newheadnodes | |
|
1495 | if self.changelog.hasnode(node)) | |
|
1496 | self._updatebranchcache(self._branchcache, ctxgen) | |
|
1497 | self._writebranchcache(self._branchcache, self.changelog.tip(), | |
|
1498 | tiprev) | |
|
1407 | # When one tries to: | |
|
1408 | # 1) destroy nodes thus calling this method (e.g. strip) | |
|
1409 | # 2) use phasecache somewhere (e.g. commit) | |
|
1410 | # | |
|
1411 | # then 2) will fail because the phasecache contains nodes that were | |
|
1412 | # removed. We can either remove phasecache from the filecache, | |
|
1413 | # causing it to reload next time it is accessed, or simply filter | |
|
1414 | # the removed nodes now and write the updated cache. | |
|
1415 | if '_phasecache' in self._filecache: | |
|
1416 | self._phasecache.filterunknown(self) | |
|
1417 | self._phasecache.write() | |
|
1418 | ||
|
1419 | # update the 'served' branch cache to help read only server process | |
|
1420 | # Thanks to branchcach collaboration this is done from the nearest | |
|
1421 | # filtered subset and it is expected to be fast. | |
|
1422 | branchmap.updatecache(self.filtered('served')) | |
|
1499 | 1423 | |
|
1500 | 1424 | # Ensure the persistent tag cache is updated. Doing it now |
|
1501 | 1425 | # means that the tag cache only has to worry about destroyed |
@@ -1507,10 +1431,7 b' class localrepository(object):' | |||
|
1507 | 1431 | # head, refresh the tag cache, then immediately add a new head. |
|
1508 | 1432 | # But I think doing it this way is necessary for the "instant |
|
1509 | 1433 | # tag cache retrieval" case to work. |
|
1510 |
self.invalidate |
|
|
1511 | ||
|
1512 | # Discard all cache entries to force reloading everything. | |
|
1513 | self._filecache.clear() | |
|
1434 | self.invalidate() | |
|
1514 | 1435 | |
|
1515 | 1436 | def walk(self, match, node=None): |
|
1516 | 1437 | ''' |
@@ -1568,7 +1489,7 b' class localrepository(object):' | |||
|
1568 | 1489 | if working: # we need to scan the working dir |
|
1569 | 1490 | subrepos = [] |
|
1570 | 1491 | if '.hgsub' in self.dirstate: |
|
1571 |
subrepos = ctx2.substate |
|
|
1492 | subrepos = sorted(ctx2.substate) | |
|
1572 | 1493 | s = self.dirstate.status(match, subrepos, listignored, |
|
1573 | 1494 | listclean, listunknown) |
|
1574 | 1495 | cmp, modified, added, removed, deleted, unknown, ignored, clean = s |
@@ -1806,6 +1727,7 b' class localrepository(object):' | |||
|
1806 | 1727 | if key.startswith('dump'): |
|
1807 | 1728 | data = base85.b85decode(remoteobs[key]) |
|
1808 | 1729 | self.obsstore.mergemarkers(tr, data) |
|
1730 | self.invalidatevolatilesets() | |
|
1809 | 1731 | if tr is not None: |
|
1810 | 1732 | tr.close() |
|
1811 | 1733 | finally: |
@@ -1841,6 +1763,7 b' class localrepository(object):' | |||
|
1841 | 1763 | |
|
1842 | 1764 | if not remote.canpush(): |
|
1843 | 1765 | raise util.Abort(_("destination does not support push")) |
|
1766 | unfi = self.unfiltered() | |
|
1844 | 1767 | # get local lock as we might write phase data |
|
1845 | 1768 | locallock = self.lock() |
|
1846 | 1769 | try: |
@@ -1852,40 +1775,43 b' class localrepository(object):' | |||
|
1852 | 1775 | try: |
|
1853 | 1776 | # discovery |
|
1854 | 1777 | fci = discovery.findcommonincoming |
|
1855 |
commoninc = fci( |
|
|
1778 | commoninc = fci(unfi, remote, force=force) | |
|
1856 | 1779 | common, inc, remoteheads = commoninc |
|
1857 | 1780 | fco = discovery.findcommonoutgoing |
|
1858 |
outgoing = fco( |
|
|
1781 | outgoing = fco(unfi, remote, onlyheads=revs, | |
|
1859 | 1782 | commoninc=commoninc, force=force) |
|
1860 | 1783 | |
|
1861 | 1784 | |
|
1862 | 1785 | if not outgoing.missing: |
|
1863 | 1786 | # nothing to push |
|
1864 |
scmutil.nochangesfound( |
|
|
1787 | scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded) | |
|
1865 | 1788 | ret = None |
|
1866 | 1789 | else: |
|
1867 | 1790 | # something to push |
|
1868 | 1791 | if not force: |
|
1869 | 1792 | # if self.obsstore == False --> no obsolete |
|
1870 | 1793 | # then, save the iteration |
|
1871 |
if |
|
|
1794 | if unfi.obsstore: | |
|
1872 | 1795 | # this message are here for 80 char limit reason |
|
1873 | 1796 | mso = _("push includes obsolete changeset: %s!") |
|
1874 |
ms |
|
|
1875 | msb = _("push includes bumped changeset: %s!") | |
|
1797 | mst = "push includes %s changeset: %s!" | |
|
1798 | # plain versions for i18n tool to detect them | |
|
1799 | _("push includes unstable changeset: %s!") | |
|
1800 | _("push includes bumped changeset: %s!") | |
|
1801 | _("push includes divergent changeset: %s!") | |
|
1876 | 1802 | # If we are to push if there is at least one |
|
1877 | 1803 | # obsolete or unstable changeset in missing, at |
|
1878 | 1804 | # least one of the missinghead will be obsolete or |
|
1879 | 1805 | # unstable. So checking heads only is ok |
|
1880 | 1806 | for node in outgoing.missingheads: |
|
1881 |
ctx = |
|
|
1807 | ctx = unfi[node] | |
|
1882 | 1808 | if ctx.obsolete(): |
|
1883 | 1809 | raise util.Abort(mso % ctx) |
|
1884 |
elif ctx. |
|
|
1885 |
raise util.Abort(ms |
|
|
1886 |
|
|
|
1887 |
|
|
|
1888 |
discovery.checkheads( |
|
|
1810 | elif ctx.troubled(): | |
|
1811 | raise util.Abort(_(mst) | |
|
1812 | % (ctx.troubles()[0], | |
|
1813 | ctx)) | |
|
1814 | discovery.checkheads(unfi, remote, outgoing, | |
|
1889 | 1815 | remoteheads, newbranch, |
|
1890 | 1816 | bool(inc)) |
|
1891 | 1817 | |
@@ -1938,7 +1864,7 b' class localrepository(object):' | |||
|
1938 | 1864 | cheads = [node for node in revs if node in common] |
|
1939 | 1865 | # and |
|
1940 | 1866 | # * commonheads parents on missing |
|
1941 |
revset = |
|
|
1867 | revset = unfi.set('%ln and parents(roots(%ln))', | |
|
1942 | 1868 | outgoing.commonheads, |
|
1943 | 1869 | outgoing.missing) |
|
1944 | 1870 | cheads.extend(c.node() for c in revset) |
@@ -1961,7 +1887,7 b' class localrepository(object):' | |||
|
1961 | 1887 | # Get the list of all revs draft on remote by public here. |
|
1962 | 1888 | # XXX Beware that revset break if droots is not strictly |
|
1963 | 1889 | # XXX root we may want to ensure it is but it is costly |
|
1964 |
outdated = |
|
|
1890 | outdated = unfi.set('heads((%ln::%ln) and public())', | |
|
1965 | 1891 | droots, cheads) |
|
1966 | 1892 | for newremotehead in outdated: |
|
1967 | 1893 | r = remote.pushkey('phases', |
@@ -1992,12 +1918,12 b' class localrepository(object):' | |||
|
1992 | 1918 | self.ui.debug("checking for updated bookmarks\n") |
|
1993 | 1919 | rb = remote.listkeys('bookmarks') |
|
1994 | 1920 | for k in rb.keys(): |
|
1995 |
if k in |
|
|
1921 | if k in unfi._bookmarks: | |
|
1996 | 1922 | nr, nl = rb[k], hex(self._bookmarks[k]) |
|
1997 |
if nr in |
|
|
1998 |
cr = |
|
|
1999 |
cl = |
|
|
2000 |
if bookmarks.validdest( |
|
|
1923 | if nr in unfi: | |
|
1924 | cr = unfi[nr] | |
|
1925 | cl = unfi[nl] | |
|
1926 | if bookmarks.validdest(unfi, cr, cl): | |
|
2001 | 1927 | r = remote.pushkey('bookmarks', k, nr, nl) |
|
2002 | 1928 | if r: |
|
2003 | 1929 | self.ui.status(_("updating bookmark %s\n") % k) |
@@ -2033,7 +1959,7 b' class localrepository(object):' | |||
|
2033 | 1959 | bases = [nullid] |
|
2034 | 1960 | csets, bases, heads = cl.nodesbetween(bases, heads) |
|
2035 | 1961 | # We assume that all ancestors of bases are known |
|
2036 |
common = |
|
|
1962 | common = cl.ancestors([cl.rev(n) for n in bases]) | |
|
2037 | 1963 | return self._changegroupsubset(common, csets, heads, source) |
|
2038 | 1964 | |
|
2039 | 1965 | def getlocalbundle(self, source, outgoing): |
@@ -2059,8 +1985,8 b' class localrepository(object):' | |||
|
2059 | 1985 | """ |
|
2060 | 1986 | cl = self.changelog |
|
2061 | 1987 | if common: |
|
2062 |
|
|
|
2063 |
common = [n for n in common if n |
|
|
1988 | hasnode = cl.hasnode | |
|
1989 | common = [n for n in common if hasnode(n)] | |
|
2064 | 1990 | else: |
|
2065 | 1991 | common = [nullid] |
|
2066 | 1992 | if not heads: |
@@ -2068,6 +1994,7 b' class localrepository(object):' | |||
|
2068 | 1994 | return self.getlocalbundle(source, |
|
2069 | 1995 | discovery.outgoing(cl, common, heads)) |
|
2070 | 1996 | |
|
1997 | @unfilteredmethod | |
|
2071 | 1998 | def _changegroupsubset(self, commonrevs, csets, heads, source): |
|
2072 | 1999 | |
|
2073 | 2000 | cl = self.changelog |
@@ -2179,6 +2106,7 b' class localrepository(object):' | |||
|
2179 | 2106 | # to avoid a race we use changegroupsubset() (issue1320) |
|
2180 | 2107 | return self.changegroupsubset(basenodes, self.heads(), source) |
|
2181 | 2108 | |
|
2109 | @unfilteredmethod | |
|
2182 | 2110 | def _changegroup(self, nodes, source): |
|
2183 | 2111 | """Compute the changegroup of all nodes that we have that a recipient |
|
2184 | 2112 | doesn't. Return a chunkbuffer object whose read() method will return |
@@ -2272,6 +2200,7 b' class localrepository(object):' | |||
|
2272 | 2200 | |
|
2273 | 2201 | return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN') |
|
2274 | 2202 | |
|
2203 | @unfilteredmethod | |
|
2275 | 2204 | def addchangegroup(self, source, srctype, url, emptyok=False): |
|
2276 | 2205 | """Add the changegroup returned by source.read() to this repo. |
|
2277 | 2206 | srctype is a string like 'push', 'pull', or 'unbundle'. url is |
@@ -2382,6 +2311,9 b' class localrepository(object):' | |||
|
2382 | 2311 | n = fl.node(new) |
|
2383 | 2312 | if n in needs: |
|
2384 | 2313 | needs.remove(n) |
|
2314 | else: | |
|
2315 | raise util.Abort( | |
|
2316 | _("received spurious file revlog entry")) | |
|
2385 | 2317 | if not needs: |
|
2386 | 2318 | del needfiles[f] |
|
2387 | 2319 | self.ui.progress(_('files'), None) |
@@ -2410,7 +2342,7 b' class localrepository(object):' | |||
|
2410 | 2342 | self.ui.status(_("added %d changesets" |
|
2411 | 2343 | " with %d changes to %d files%s\n") |
|
2412 | 2344 | % (changesets, revisions, files, htext)) |
|
2413 | obsolete.clearobscaches(self) | |
|
2345 | self.invalidatevolatilesets() | |
|
2414 | 2346 | |
|
2415 | 2347 | if changesets > 0: |
|
2416 | 2348 | p = lambda: cl.writepending() and self.root or "" |
@@ -2444,7 +2376,11 b' class localrepository(object):' | |||
|
2444 | 2376 | tr.close() |
|
2445 | 2377 | |
|
2446 | 2378 | if changesets > 0: |
|
2447 | self.updatebranchcache() | |
|
2379 | if srctype != 'strip': | |
|
2380 | # During strip, branchcache is invalid but coming call to | |
|
2381 | # `destroyed` will repair it. | |
|
2382 | # In other case we can safely update cache on disk. | |
|
2383 | branchmap.updatecache(self.filtered('served')) | |
|
2448 | 2384 | def runhooks(): |
|
2449 | 2385 | # forcefully update the on-disk branch cache |
|
2450 | 2386 | self.ui.debug("updating the branch cache\n") |
@@ -2538,12 +2474,20 b' class localrepository(object):' | |||
|
2538 | 2474 | for bheads in rbranchmap.itervalues(): |
|
2539 | 2475 | rbheads.extend(bheads) |
|
2540 | 2476 | |
|
2541 | self.branchcache = rbranchmap | |
|
2542 | 2477 | if rbheads: |
|
2543 | 2478 | rtiprev = max((int(self.changelog.rev(node)) |
|
2544 | 2479 | for node in rbheads)) |
|
2545 |
|
|
|
2546 |
self[rtiprev].node(), |
|
|
2480 | cache = branchmap.branchcache(rbranchmap, | |
|
2481 | self[rtiprev].node(), | |
|
2482 | rtiprev) | |
|
2483 | # Try to stick it as low as possible | |
|
2484 | # filter above served are unlikely to be fetch from a clone | |
|
2485 | for candidate in ('base', 'immutable', 'served'): | |
|
2486 | rview = self.filtered(candidate) | |
|
2487 | if cache.validfor(rview): | |
|
2488 | self._branchcaches[candidate] = cache | |
|
2489 | cache.write(rview) | |
|
2490 | break | |
|
2547 | 2491 | self.invalidate() |
|
2548 | 2492 | return len(self.heads()) + 1 |
|
2549 | 2493 | finally: |
@@ -2607,7 +2551,7 b' class localrepository(object):' | |||
|
2607 | 2551 | fp.write(text) |
|
2608 | 2552 | finally: |
|
2609 | 2553 | fp.close() |
|
2610 | return self.pathto(fp.name[len(self.root)+1:]) | |
|
2554 | return self.pathto(fp.name[len(self.root) + 1:]) | |
|
2611 | 2555 | |
|
2612 | 2556 | # used to avoid circular references so destructors work |
|
2613 | 2557 | def aftertrans(files): |
@@ -117,15 +117,23 b' class manifest(revlog.revlog):' | |||
|
117 | 117 | # apply the changes collected during the bisect loop to our addlist |
|
118 | 118 | # return a delta suitable for addrevision |
|
119 | 119 | def addlistdelta(addlist, x): |
|
120 | # start from the bottom up | |
|
121 | # so changes to the offsets don't mess things up. | |
|
122 | for start, end, content in reversed(x): | |
|
120 | # for large addlist arrays, building a new array is cheaper | |
|
121 | # than repeatedly modifying the existing one | |
|
122 | currentposition = 0 | |
|
123 | newaddlist = array.array('c') | |
|
124 | ||
|
125 | for start, end, content in x: | |
|
126 | newaddlist += addlist[currentposition:start] | |
|
123 | 127 | if content: |
|
124 |
addlist |
|
|
125 | else: | |
|
126 | del addlist[start:end] | |
|
127 | return "".join(struct.pack(">lll", start, end, len(content)) | |
|
128 | newaddlist += array.array('c', content) | |
|
129 | ||
|
130 | currentposition = end | |
|
131 | ||
|
132 | newaddlist += addlist[currentposition:] | |
|
133 | ||
|
134 | deltatext = "".join(struct.pack(">lll", start, end, len(content)) | |
|
128 | 135 | + content for start, end, content in x) |
|
136 | return deltatext, newaddlist | |
|
129 | 137 | |
|
130 | 138 | def checkforbidden(l): |
|
131 | 139 | for f in l: |
@@ -194,7 +202,8 b' class manifest(revlog.revlog):' | |||
|
194 | 202 | if dstart is not None: |
|
195 | 203 | delta.append([dstart, dend, "".join(dline)]) |
|
196 | 204 | # apply the delta to the addlist, and get a delta for addrevision |
|
197 |
|
|
|
205 | deltatext, addlist = addlistdelta(addlist, delta) | |
|
206 | cachedelta = (self.rev(p1), deltatext) | |
|
198 | 207 | arraytext = addlist |
|
199 | 208 | text = util.buffer(arraytext) |
|
200 | 209 |
@@ -7,7 +7,7 b'' | |||
|
7 | 7 | |
|
8 | 8 | from i18n import _ |
|
9 | 9 | import bdiff, mpatch, util |
|
10 | import re, struct | |
|
10 | import re, struct, base85, zlib | |
|
11 | 11 | |
|
12 | 12 | def splitnewlines(text): |
|
13 | 13 | '''like str.splitlines, but only split on newlines.''' |
@@ -142,20 +142,7 b' def allblocks(text1, text2, opts=None, l' | |||
|
142 | 142 | yield s, type |
|
143 | 143 | yield s1, '=' |
|
144 | 144 | |
|
145 | def diffline(revs, a, b, opts): | |
|
146 | parts = ['diff'] | |
|
147 | if opts.git: | |
|
148 | parts.append('--git') | |
|
149 | if revs and not opts.git: | |
|
150 | parts.append(' '.join(["-r %s" % rev for rev in revs])) | |
|
151 | if opts.git: | |
|
152 | parts.append('a/%s' % a) | |
|
153 | parts.append('b/%s' % b) | |
|
154 | else: | |
|
155 | parts.append(a) | |
|
156 | return ' '.join(parts) + '\n' | |
|
157 | ||
|
158 | def unidiff(a, ad, b, bd, fn1, fn2, r=None, opts=defaultopts): | |
|
145 | def unidiff(a, ad, b, bd, fn1, fn2, opts=defaultopts): | |
|
159 | 146 | def datetag(date, fn=None): |
|
160 | 147 | if not opts.git and not opts.nodates: |
|
161 | 148 | return '\t%s\n' % date |
@@ -206,9 +193,6 b' def unidiff(a, ad, b, bd, fn1, fn2, r=No' | |||
|
206 | 193 | if l[ln][-1] != '\n': |
|
207 | 194 | l[ln] += "\n\ No newline at end of file\n" |
|
208 | 195 | |
|
209 | if r: | |
|
210 | l.insert(0, diffline(r, fn1, fn2, opts)) | |
|
211 | ||
|
212 | 196 | return "".join(l) |
|
213 | 197 | |
|
214 | 198 | # creates a headerless unified diff |
@@ -314,6 +298,41 b' def _unidiff(t1, t2, l1, l2, opts=defaul' | |||
|
314 | 298 | for x in yieldhunk(hunk): |
|
315 | 299 | yield x |
|
316 | 300 | |
|
301 | def b85diff(to, tn): | |
|
302 | '''print base85-encoded binary diff''' | |
|
303 | def fmtline(line): | |
|
304 | l = len(line) | |
|
305 | if l <= 26: | |
|
306 | l = chr(ord('A') + l - 1) | |
|
307 | else: | |
|
308 | l = chr(l - 26 + ord('a') - 1) | |
|
309 | return '%c%s\n' % (l, base85.b85encode(line, True)) | |
|
310 | ||
|
311 | def chunk(text, csize=52): | |
|
312 | l = len(text) | |
|
313 | i = 0 | |
|
314 | while i < l: | |
|
315 | yield text[i:i + csize] | |
|
316 | i += csize | |
|
317 | ||
|
318 | if to is None: | |
|
319 | to = '' | |
|
320 | if tn is None: | |
|
321 | tn = '' | |
|
322 | ||
|
323 | if to == tn: | |
|
324 | return '' | |
|
325 | ||
|
326 | # TODO: deltas | |
|
327 | ret = [] | |
|
328 | ret.append('GIT binary patch\n') | |
|
329 | ret.append('literal %s\n' % len(tn)) | |
|
330 | for l in chunk(zlib.compress(tn)): | |
|
331 | ret.append(fmtline(l)) | |
|
332 | ret.append('\n') | |
|
333 | ||
|
334 | return ''.join(ret) | |
|
335 | ||
|
317 | 336 | def patchtext(bin): |
|
318 | 337 | pos = 0 |
|
319 | 338 | t = [] |
@@ -7,7 +7,7 b'' | |||
|
7 | 7 | |
|
8 | 8 | from node import nullid, nullrev, hex, bin |
|
9 | 9 | from i18n import _ |
|
10 |
import error |
|
|
10 | import error, util, filemerge, copies, subrepo | |
|
11 | 11 | import errno, os, shutil |
|
12 | 12 | |
|
13 | 13 | class mergestate(object): |
@@ -45,11 +45,11 b' class mergestate(object):' | |||
|
45 | 45 | f.write("\0".join([d] + v) + "\n") |
|
46 | 46 | f.close() |
|
47 | 47 | self._dirty = False |
|
48 |
def add(self, fcl, fco, fca, fd |
|
|
48 | def add(self, fcl, fco, fca, fd): | |
|
49 | 49 | hash = util.sha1(fcl.path()).hexdigest() |
|
50 | 50 | self._repo.opener.write("merge/" + hash, fcl.data()) |
|
51 | 51 | self._state[fd] = ['u', hash, fcl.path(), fca.path(), |
|
52 | hex(fca.filenode()), fco.path(), flags] | |
|
52 | hex(fca.filenode()), fco.path(), fcl.flags()] | |
|
53 | 53 | self._dirty = True |
|
54 | 54 | def __contains__(self, dfile): |
|
55 | 55 | return dfile in self._state |
@@ -67,12 +67,22 b' class mergestate(object):' | |||
|
67 | 67 | if self[dfile] == 'r': |
|
68 | 68 | return 0 |
|
69 | 69 | state, hash, lfile, afile, anode, ofile, flags = self._state[dfile] |
|
70 | fcd = wctx[dfile] | |
|
71 | fco = octx[ofile] | |
|
72 | fca = self._repo.filectx(afile, fileid=anode) | |
|
73 | # "premerge" x flags | |
|
74 | flo = fco.flags() | |
|
75 | fla = fca.flags() | |
|
76 | if 'x' in flags + flo + fla and 'l' not in flags + flo + fla: | |
|
77 | if fca.node() == nullid: | |
|
78 | self._repo.ui.warn(_('warning: cannot merge flags for %s\n') % | |
|
79 | afile) | |
|
80 | elif flags == fla: | |
|
81 | flags = flo | |
|
82 | # restore local | |
|
70 | 83 | f = self._repo.opener("merge/" + hash) |
|
71 | 84 | self._repo.wwrite(dfile, f.read(), flags) |
|
72 | 85 | f.close() |
|
73 | fcd = wctx[dfile] | |
|
74 | fco = octx[ofile] | |
|
75 | fca = self._repo.filectx(afile, fileid=anode) | |
|
76 | 86 | r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca) |
|
77 | 87 | if r is None: |
|
78 | 88 | # no real conflict |
@@ -162,18 +172,18 b' def _forgetremoved(wctx, mctx, branchmer' | |||
|
162 | 172 | as removed. |
|
163 | 173 | """ |
|
164 | 174 | |
|
165 | action = [] | |
|
175 | actions = [] | |
|
166 | 176 | state = branchmerge and 'r' or 'f' |
|
167 | 177 | for f in wctx.deleted(): |
|
168 | 178 | if f not in mctx: |
|
169 | action.append((f, state)) | |
|
179 | actions.append((f, state)) | |
|
170 | 180 | |
|
171 | 181 | if not branchmerge: |
|
172 | 182 | for f in wctx.removed(): |
|
173 | 183 | if f not in mctx: |
|
174 | action.append((f, "f")) | |
|
184 | actions.append((f, "f")) | |
|
175 | 185 | |
|
176 | return action | |
|
186 | return actions | |
|
177 | 187 | |
|
178 | 188 | def manifestmerge(repo, p1, p2, pa, overwrite, partial): |
|
179 | 189 | """ |
@@ -183,44 +193,19 b' def manifestmerge(repo, p1, p2, pa, over' | |||
|
183 | 193 | partial = function to filter file lists |
|
184 | 194 | """ |
|
185 | 195 | |
|
186 | def fmerge(f, f2, fa): | |
|
187 | """merge flags""" | |
|
188 | a, m, n = ma.flags(fa), m1.flags(f), m2.flags(f2) | |
|
189 | if m == n: # flags agree | |
|
190 | return m # unchanged | |
|
191 | if m and n and not a: # flags set, don't agree, differ from parent | |
|
192 | r = repo.ui.promptchoice( | |
|
193 | _(" conflicting flags for %s\n" | |
|
194 | "(n)one, e(x)ec or sym(l)ink?") % f, | |
|
195 | (_("&None"), _("E&xec"), _("Sym&link")), 0) | |
|
196 | if r == 1: | |
|
197 | return "x" # Exec | |
|
198 | if r == 2: | |
|
199 | return "l" # Symlink | |
|
200 | return "" | |
|
201 | if m and m != a: # changed from a to m | |
|
202 | return m | |
|
203 | if n and n != a: # changed from a to n | |
|
204 | if (n == 'l' or a == 'l') and m1.get(f) != ma.get(f): | |
|
205 | # can't automatically merge symlink flag when there | |
|
206 | # are file-level conflicts here, let filemerge take | |
|
207 | # care of it | |
|
208 | return m | |
|
209 | return n | |
|
210 | return '' # flag was cleared | |
|
211 | ||
|
212 | 196 | def act(msg, m, f, *args): |
|
213 | 197 | repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m)) |
|
214 | action.append((f, m) + args) | |
|
198 | actions.append((f, m) + args) | |
|
215 | 199 | |
|
216 | action, copy = [], {} | |
|
200 | actions, copy, movewithdir = [], {}, {} | |
|
217 | 201 | |
|
218 | 202 | if overwrite: |
|
219 | 203 | pa = p1 |
|
220 | 204 | elif pa == p2: # backwards |
|
221 | 205 | pa = p1.p1() |
|
222 | 206 | elif pa and repo.ui.configbool("merge", "followcopies", True): |
|
223 |
|
|
|
207 | ret = copies.mergecopies(repo, p1, p2, pa) | |
|
208 | copy, movewithdir, diverge, renamedelete = ret | |
|
224 | 209 | for of, fl in diverge.iteritems(): |
|
225 | 210 | act("divergent renames", "dr", of, fl) |
|
226 | 211 | for of, fl in renamedelete.iteritems(): |
@@ -233,40 +218,48 b' def manifestmerge(repo, p1, p2, pa, over' | |||
|
233 | 218 | |
|
234 | 219 | m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest() |
|
235 | 220 | copied = set(copy.values()) |
|
221 | copied.update(movewithdir.values()) | |
|
236 | 222 | |
|
237 | 223 | if '.hgsubstate' in m1: |
|
238 | 224 | # check whether sub state is modified |
|
239 | for s in p1.substate: | |
|
225 | for s in sorted(p1.substate): | |
|
240 | 226 | if p1.sub(s).dirty(): |
|
241 | 227 | m1['.hgsubstate'] += "+" |
|
242 | 228 | break |
|
243 | 229 | |
|
244 | 230 | # Compare manifests |
|
245 | for f, n in m1.iteritems(): | |
|
231 | for f, n in sorted(m1.iteritems()): | |
|
246 | 232 | if partial and not partial(f): |
|
247 | 233 | continue |
|
248 | 234 | if f in m2: |
|
249 | rflags = fmerge(f, f, f) | |
|
235 | n2 = m2[f] | |
|
236 | fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f) | |
|
237 | nol = 'l' not in fl1 + fl2 + fla | |
|
250 | 238 | a = ma.get(f, nullid) |
|
251 | if n == m2[f] or m2[f] == a: # same or local newer | |
|
252 | # is file locally modified or flags need changing? | |
|
253 | # dirstate flags may need to be made current | |
|
254 | if m1.flags(f) != rflags or n[20:]: | |
|
255 | act("update permissions", "e", f, rflags) | |
|
256 | elif n == a: # remote newer | |
|
257 |
act(" |
|
|
258 |
else: |
|
|
259 |
act(" |
|
|
239 | if n == n2 and fl1 == fl2: | |
|
240 | pass # same - keep local | |
|
241 | elif n2 == a and fl2 == fla: | |
|
242 | pass # remote unchanged - keep local | |
|
243 | elif n == a and fl1 == fla: # local unchanged - use remote | |
|
244 | if n == n2: # optimization: keep local content | |
|
245 | act("update permissions", "e", f, fl2) | |
|
246 | else: | |
|
247 | act("remote is newer", "g", f, fl2) | |
|
248 | elif nol and n2 == a: # remote only changed 'x' | |
|
249 | act("update permissions", "e", f, fl2) | |
|
250 | elif nol and n == a: # local only changed 'x' | |
|
251 | act("remote is newer", "g", f, fl) | |
|
252 | else: # both changed something | |
|
253 | act("versions differ", "m", f, f, f, False) | |
|
260 | 254 | elif f in copied: # files we'll deal with on m2 side |
|
261 | 255 | pass |
|
256 | elif f in movewithdir: # directory rename | |
|
257 | f2 = movewithdir[f] | |
|
258 | act("remote renamed directory to " + f2, "d", f, None, f2, | |
|
259 | m1.flags(f)) | |
|
262 | 260 | elif f in copy: |
|
263 | 261 | f2 = copy[f] |
|
264 | if f2 not in m2: # directory rename | |
|
265 | act("remote renamed directory to " + f2, "d", | |
|
266 | f, None, f2, m1.flags(f)) | |
|
267 | else: # case 2 A,B/B/B or case 4,21 A/B/B | |
|
268 | act("local copied/moved to " + f2, "m", | |
|
269 | f, f2, f, fmerge(f, f2, f2), False) | |
|
262 | act("local copied/moved to " + f2, "m", f, f2, f, False) | |
|
270 | 263 | elif f in ma: # clean, a different, no remote |
|
271 | 264 | if n != ma[f]: |
|
272 | 265 | if repo.ui.promptchoice( |
@@ -281,28 +274,28 b' def manifestmerge(repo, p1, p2, pa, over' | |||
|
281 | 274 | else: |
|
282 | 275 | act("other deleted", "r", f) |
|
283 | 276 | |
|
284 | for f, n in m2.iteritems(): | |
|
277 | for f, n in sorted(m2.iteritems()): | |
|
285 | 278 | if partial and not partial(f): |
|
286 | 279 | continue |
|
287 | 280 | if f in m1 or f in copied: # files already visited |
|
288 | 281 | continue |
|
289 |
if f in |
|
|
282 | if f in movewithdir: | |
|
283 | f2 = movewithdir[f] | |
|
284 | act("local renamed directory to " + f2, "d", None, f, f2, | |
|
285 | m2.flags(f)) | |
|
286 | elif f in copy: | |
|
290 | 287 | f2 = copy[f] |
|
291 |
if f2 |
|
|
292 | act("local renamed directory to " + f2, "d", | |
|
293 | None, f, f2, m2.flags(f)) | |
|
294 | elif f2 in m2: # rename case 1, A/A,B/A | |
|
288 | if f2 in m2: | |
|
295 | 289 | act("remote copied to " + f, "m", |
|
296 |
f2, f, f, |
|
|
297 |
else: |
|
|
290 | f2, f, f, False) | |
|
291 | else: | |
|
298 | 292 | act("remote moved to " + f, "m", |
|
299 |
f2, f, f, |
|
|
293 | f2, f, f, True) | |
|
300 | 294 | elif f not in ma: |
|
301 | 295 | if (not overwrite |
|
302 | 296 | and _checkunknownfile(repo, p1, p2, f)): |
|
303 | rflags = fmerge(f, f, f) | |
|
304 | 297 | act("remote differs from untracked local", |
|
305 |
"m", f, f, f, |
|
|
298 | "m", f, f, f, False) | |
|
306 | 299 | else: |
|
307 | 300 | act("remote created", "g", f, m2.flags(f)) |
|
308 | 301 | elif n != ma[f]: |
@@ -312,12 +305,12 b' def manifestmerge(repo, p1, p2, pa, over' | |||
|
312 | 305 | (_("&Changed"), _("&Deleted")), 0) == 0: |
|
313 | 306 | act("prompt recreating", "g", f, m2.flags(f)) |
|
314 | 307 | |
|
315 | return action | |
|
308 | return actions | |
|
316 | 309 | |
|
317 | 310 | def actionkey(a): |
|
318 |
return a[1] == |
|
|
311 | return a[1] == "r" and -1 or 0, a | |
|
319 | 312 | |
|
320 | def applyupdates(repo, action, wctx, mctx, actx, overwrite): | |
|
313 | def applyupdates(repo, actions, wctx, mctx, actx, overwrite): | |
|
321 | 314 | """apply the merge action list to the working directory |
|
322 | 315 | |
|
323 | 316 | wctx is the working copy context |
@@ -332,14 +325,14 b' def applyupdates(repo, action, wctx, mct' | |||
|
332 | 325 | ms = mergestate(repo) |
|
333 | 326 | ms.reset(wctx.p1().node()) |
|
334 | 327 | moves = [] |
|
335 | action.sort(key=actionkey) | |
|
328 | actions.sort(key=actionkey) | |
|
336 | 329 | |
|
337 | 330 | # prescan for merges |
|
338 | for a in action: | |
|
331 | for a in actions: | |
|
339 | 332 | f, m = a[:2] |
|
340 |
if m == |
|
|
341 |
f2, fd |
|
|
342 | if f == '.hgsubstate': # merged internally | |
|
333 | if m == "m": # merge | |
|
334 | f2, fd, move = a[2:] | |
|
335 | if fd == '.hgsubstate': # merged internally | |
|
343 | 336 | continue |
|
344 | 337 | repo.ui.debug("preserving %s for resolve of %s\n" % (f, fd)) |
|
345 | 338 | fcl = wctx[f] |
@@ -353,45 +346,42 b' def applyupdates(repo, action, wctx, mct' | |||
|
353 | 346 | fca = fcl.ancestor(fco, actx) |
|
354 | 347 | if not fca: |
|
355 | 348 | fca = repo.filectx(f, fileid=nullrev) |
|
356 |
ms.add(fcl, fco, fca, fd |
|
|
349 | ms.add(fcl, fco, fca, fd) | |
|
357 | 350 | if f != fd and move: |
|
358 | 351 | moves.append(f) |
|
359 | 352 | |
|
360 | audit = scmutil.pathauditor(repo.root) | |
|
353 | audit = repo.wopener.audit | |
|
361 | 354 | |
|
362 | 355 | # remove renamed files after safely stored |
|
363 | 356 | for f in moves: |
|
364 | 357 | if os.path.lexists(repo.wjoin(f)): |
|
365 | 358 | repo.ui.debug("removing %s\n" % f) |
|
366 | 359 | audit(f) |
|
367 |
|
|
|
360 | util.unlinkpath(repo.wjoin(f)) | |
|
368 | 361 | |
|
369 | numupdates = len(action) | |
|
370 | for i, a in enumerate(action): | |
|
362 | numupdates = len(actions) | |
|
363 | for i, a in enumerate(actions): | |
|
371 | 364 | f, m = a[:2] |
|
372 | 365 | repo.ui.progress(_('updating'), i + 1, item=f, total=numupdates, |
|
373 | 366 | unit=_('files')) |
|
374 | if f and f[0] == "/": | |
|
375 | continue | |
|
376 | 367 | if m == "r": # remove |
|
377 | 368 | repo.ui.note(_("removing %s\n") % f) |
|
378 | 369 | audit(f) |
|
379 | 370 | if f == '.hgsubstate': # subrepo states need updating |
|
380 | 371 | subrepo.submerge(repo, wctx, mctx, wctx, overwrite) |
|
381 | 372 | try: |
|
382 | util.unlinkpath(repo.wjoin(f)) | |
|
373 | util.unlinkpath(repo.wjoin(f), ignoremissing=True) | |
|
383 | 374 | except OSError, inst: |
|
384 | if inst.errno != errno.ENOENT: | |
|
385 | repo.ui.warn(_("update failed to remove %s: %s!\n") % | |
|
386 | (f, inst.strerror)) | |
|
375 | repo.ui.warn(_("update failed to remove %s: %s!\n") % | |
|
376 | (f, inst.strerror)) | |
|
387 | 377 | removed += 1 |
|
388 | 378 | elif m == "m": # merge |
|
389 | if f == '.hgsubstate': # subrepo states need updating | |
|
379 | if fd == '.hgsubstate': # subrepo states need updating | |
|
390 | 380 | subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx), |
|
391 | 381 | overwrite) |
|
392 | 382 | continue |
|
393 |
f2, fd |
|
|
394 |
|
|
|
383 | f2, fd, move = a[2:] | |
|
384 | audit(fd) | |
|
395 | 385 | r = ms.resolve(fd, wctx, mctx) |
|
396 | 386 | if r is not None and r > 0: |
|
397 | 387 | unresolved += 1 |
@@ -400,17 +390,10 b' def applyupdates(repo, action, wctx, mct' | |||
|
400 | 390 | updated += 1 |
|
401 | 391 | else: |
|
402 | 392 | merged += 1 |
|
403 | if (move and repo.dirstate.normalize(fd) != f | |
|
404 | and os.path.lexists(repo.wjoin(f))): | |
|
405 | repo.ui.debug("removing %s\n" % f) | |
|
406 | audit(f) | |
|
407 | os.unlink(repo.wjoin(f)) | |
|
408 | 393 | elif m == "g": # get |
|
409 | 394 | flags = a[2] |
|
410 | 395 | repo.ui.note(_("getting %s\n") % f) |
|
411 |
|
|
|
412 | repo.wwrite(f, t, flags) | |
|
413 | t = None | |
|
396 | repo.wwrite(f, mctx.filectx(f).data(), flags) | |
|
414 | 397 | updated += 1 |
|
415 | 398 | if f == '.hgsubstate': # subrepo states need updating |
|
416 | 399 | subrepo.submerge(repo, wctx, mctx, wctx, overwrite) |
@@ -419,13 +402,11 b' def applyupdates(repo, action, wctx, mct' | |||
|
419 | 402 | if f: |
|
420 | 403 | repo.ui.note(_("moving %s to %s\n") % (f, fd)) |
|
421 | 404 | audit(f) |
|
422 |
|
|
|
423 | repo.wwrite(fd, t, flags) | |
|
405 | repo.wwrite(fd, wctx.filectx(f).data(), flags) | |
|
424 | 406 | util.unlinkpath(repo.wjoin(f)) |
|
425 | 407 | if f2: |
|
426 | 408 | repo.ui.note(_("getting %s to %s\n") % (f2, fd)) |
|
427 |
|
|
|
428 | repo.wwrite(fd, t, flags) | |
|
409 | repo.wwrite(fd, mctx.filectx(f2).data(), flags) | |
|
429 | 410 | updated += 1 |
|
430 | 411 | elif m == "dr": # divergent renames |
|
431 | 412 | fl = a[2] |
@@ -441,17 +422,39 b' def applyupdates(repo, action, wctx, mct' | |||
|
441 | 422 | repo.ui.warn(" %s\n" % nf) |
|
442 | 423 | elif m == "e": # exec |
|
443 | 424 | flags = a[2] |
|
444 |
|
|
|
425 | audit(f) | |
|
445 | 426 | util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags) |
|
427 | updated += 1 | |
|
446 | 428 | ms.commit() |
|
447 | 429 | repo.ui.progress(_('updating'), None, total=numupdates, unit=_('files')) |
|
448 | 430 | |
|
449 | 431 | return updated, merged, removed, unresolved |
|
450 | 432 | |
|
451 |
def |
|
|
433 | def calculateupdates(repo, tctx, mctx, ancestor, branchmerge, force, partial): | |
|
434 | "Calculate the actions needed to merge mctx into tctx" | |
|
435 | actions = [] | |
|
436 | folding = not util.checkcase(repo.path) | |
|
437 | if folding: | |
|
438 | # collision check is not needed for clean update | |
|
439 | if (not branchmerge and | |
|
440 | (force or not tctx.dirty(missing=True, branch=False))): | |
|
441 | _checkcollision(mctx, None) | |
|
442 | else: | |
|
443 | _checkcollision(mctx, (tctx, ancestor)) | |
|
444 | if not force: | |
|
445 | _checkunknown(repo, tctx, mctx) | |
|
446 | if tctx.rev() is None: | |
|
447 | actions += _forgetremoved(tctx, mctx, branchmerge) | |
|
448 | actions += manifestmerge(repo, tctx, mctx, | |
|
449 | ancestor, | |
|
450 | force and not branchmerge, | |
|
451 | partial) | |
|
452 | return actions | |
|
453 | ||
|
454 | def recordupdates(repo, actions, branchmerge): | |
|
452 | 455 | "record merge actions to the dirstate" |
|
453 | 456 | |
|
454 | for a in action: | |
|
457 | for a in actions: | |
|
455 | 458 | f, m = a[:2] |
|
456 | 459 | if m == "r": # remove |
|
457 | 460 | if branchmerge: |
@@ -471,7 +474,7 b' def recordupdates(repo, action, branchme' | |||
|
471 | 474 | else: |
|
472 | 475 | repo.dirstate.normal(f) |
|
473 | 476 | elif m == "m": # merge |
|
474 |
f2, fd |
|
|
477 | f2, fd, move = a[2:] | |
|
475 | 478 | if branchmerge: |
|
476 | 479 | # We've done a branch merge, mark this file as merged |
|
477 | 480 | # so that we properly record the merger later |
@@ -590,7 +593,7 b' def update(repo, node, branchmerge, forc' | |||
|
590 | 593 | if not force and (wc.files() or wc.deleted()): |
|
591 | 594 | raise util.Abort(_("outstanding uncommitted changes"), |
|
592 | 595 | hint=_("use 'hg status' to list changes")) |
|
593 | for s in wc.substate: | |
|
596 | for s in sorted(wc.substate): | |
|
594 | 597 | if wc.sub(s).dirty(): |
|
595 | 598 | raise util.Abort(_("outstanding uncommitted changes in " |
|
596 | 599 | "subrepository '%s'") % s) |
@@ -609,19 +612,8 b' def update(repo, node, branchmerge, forc' | |||
|
609 | 612 | pa = p1 |
|
610 | 613 | |
|
611 | 614 | ### calculate phase |
|
612 | action = [] | |
|
613 | folding = not util.checkcase(repo.path) | |
|
614 | if folding: | |
|
615 | # collision check is not needed for clean update | |
|
616 | if (not branchmerge and | |
|
617 | (force or not wc.dirty(missing=True, branch=False))): | |
|
618 | _checkcollision(p2, None) | |
|
619 | else: | |
|
620 | _checkcollision(p2, (wc, pa)) | |
|
621 | if not force: | |
|
622 | _checkunknown(repo, wc, p2) | |
|
623 | action += _forgetremoved(wc, p2, branchmerge) | |
|
624 | action += manifestmerge(repo, wc, p2, pa, overwrite, partial) | |
|
615 | actions = calculateupdates(repo, wc, p2, pa, | |
|
616 | branchmerge, force, partial) | |
|
625 | 617 | |
|
626 | 618 | ### apply phase |
|
627 | 619 | if not branchmerge: # just jump to the new rev |
@@ -629,11 +621,11 b' def update(repo, node, branchmerge, forc' | |||
|
629 | 621 | if not partial: |
|
630 | 622 | repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2) |
|
631 | 623 | |
|
632 | stats = applyupdates(repo, action, wc, p2, pa, overwrite) | |
|
624 | stats = applyupdates(repo, actions, wc, p2, pa, overwrite) | |
|
633 | 625 | |
|
634 | 626 | if not partial: |
|
635 | 627 | repo.setparents(fp1, fp2) |
|
636 | recordupdates(repo, action, branchmerge) | |
|
628 | recordupdates(repo, actions, branchmerge) | |
|
637 | 629 | if not branchmerge: |
|
638 | 630 | repo.dirstate.setbranch(p2.branch()) |
|
639 | 631 | finally: |
@@ -402,6 +402,200 b' def allsuccessors(obsstore, nodes, ignor' | |||
|
402 | 402 | seen.add(suc) |
|
403 | 403 | remaining.add(suc) |
|
404 | 404 | |
|
405 | def successorssets(repo, initialnode, cache=None): | |
|
406 | """Return all set of successors of initial nodes | |
|
407 | ||
|
408 | Successors set of changeset A are a group of revision that succeed A. It | |
|
409 | succeed A as a consistent whole, each revision being only partial | |
|
410 | replacement. Successors set contains non-obsolete changeset only. | |
|
411 | ||
|
412 | In most cases a changeset A have zero (changeset pruned) or a single | |
|
413 | successors set that contains a single successor (changeset A replaced by | |
|
414 | A') | |
|
415 | ||
|
416 | When changeset is split, it results successors set containing more than | |
|
417 | a single element. Divergent rewriting will result in multiple successors | |
|
418 | sets. | |
|
419 | ||
|
420 | They are returned as a list of tuples containing all valid successors sets. | |
|
421 | ||
|
422 | Final successors unknown locally are considered plain prune (obsoleted | |
|
423 | without successors). | |
|
424 | ||
|
425 | The optional `cache` parameter is a dictionary that may contains | |
|
426 | precomputed successors sets. It is meant to reuse the computation of | |
|
427 | previous call to `successorssets` when multiple calls are made at the same | |
|
428 | time. The cache dictionary is updated in place. The caller is responsible | |
|
429 | for its live spawn. Code that makes multiple calls to `successorssets` | |
|
430 | *must* use this cache mechanism or suffer terrible performances.""" | |
|
431 | ||
|
432 | succmarkers = repo.obsstore.successors | |
|
433 | ||
|
434 | # Stack of nodes we search successors sets for | |
|
435 | toproceed = [initialnode] | |
|
436 | # set version of above list for fast loop detection | |
|
437 | # element added to "toproceed" must be added here | |
|
438 | stackedset = set(toproceed) | |
|
439 | if cache is None: | |
|
440 | cache = {} | |
|
441 | ||
|
442 | # This while loop is the flattened version of a recursive search for | |
|
443 | # successors sets | |
|
444 | # | |
|
445 | # def successorssets(x): | |
|
446 | # successors = directsuccessors(x) | |
|
447 | # ss = [[]] | |
|
448 | # for succ in directsuccessors(x): | |
|
449 | # # product as in itertools cartesian product | |
|
450 | # ss = product(ss, successorssets(succ)) | |
|
451 | # return ss | |
|
452 | # | |
|
453 | # But we can not use plain recursive calls here: | |
|
454 | # - that would blow the python call stack | |
|
455 | # - obsolescence markers may have cycles, we need to handle them. | |
|
456 | # | |
|
457 | # The `toproceed` list act as our call stack. Every node we search | |
|
458 | # successors set for are stacked there. | |
|
459 | # | |
|
460 | # The `stackedset` is set version of this stack used to check if a node is | |
|
461 | # already stacked. This check is used to detect cycles and prevent infinite | |
|
462 | # loop. | |
|
463 | # | |
|
464 | # successors set of all nodes are stored in the `cache` dictionary. | |
|
465 | # | |
|
466 | # After this while loop ends we use the cache to return the successors sets | |
|
467 | # for the node requested by the caller. | |
|
468 | while toproceed: | |
|
469 | # Every iteration tries to compute the successors sets of the topmost | |
|
470 | # node of the stack: CURRENT. | |
|
471 | # | |
|
472 | # There are four possible outcomes: | |
|
473 | # | |
|
474 | # 1) We already know the successors sets of CURRENT: | |
|
475 | # -> mission accomplished, pop it from the stack. | |
|
476 | # 2) Node is not obsolete: | |
|
477 | # -> the node is its own successors sets. Add it to the cache. | |
|
478 | # 3) We do not know successors set of direct successors of CURRENT: | |
|
479 | # -> We add those successors to the stack. | |
|
480 | # 4) We know successors sets of all direct successors of CURRENT: | |
|
481 | # -> We can compute CURRENT successors set and add it to the | |
|
482 | # cache. | |
|
483 | # | |
|
484 | current = toproceed[-1] | |
|
485 | if current in cache: | |
|
486 | # case (1): We already know the successors sets | |
|
487 | stackedset.remove(toproceed.pop()) | |
|
488 | elif current not in succmarkers: | |
|
489 | # case (2): The node is not obsolete. | |
|
490 | if current in repo: | |
|
491 | # We have a valid last successors. | |
|
492 | cache[current] = [(current,)] | |
|
493 | else: | |
|
494 | # Final obsolete version is unknown locally. | |
|
495 | # Do not count that as a valid successors | |
|
496 | cache[current] = [] | |
|
497 | else: | |
|
498 | # cases (3) and (4) | |
|
499 | # | |
|
500 | # We proceed in two phases. Phase 1 aims to distinguish case (3) | |
|
501 | # from case (4): | |
|
502 | # | |
|
503 | # For each direct successors of CURRENT, we check whether its | |
|
504 | # successors sets are known. If they are not, we stack the | |
|
505 | # unknown node and proceed to the next iteration of the while | |
|
506 | # loop. (case 3) | |
|
507 | # | |
|
508 | # During this step, we may detect obsolescence cycles: a node | |
|
509 | # with unknown successors sets but already in the call stack. | |
|
510 | # In such a situation, we arbitrary set the successors sets of | |
|
511 | # the node to nothing (node pruned) to break the cycle. | |
|
512 | # | |
|
513 | # If no break was encountered we proceeed to phase 2. | |
|
514 | # | |
|
515 | # Phase 2 computes successors sets of CURRENT (case 4); see details | |
|
516 | # in phase 2 itself. | |
|
517 | # | |
|
518 | # Note the two levels of iteration in each phase. | |
|
519 | # - The first one handles obsolescence markers using CURRENT as | |
|
520 | # precursor (successors markers of CURRENT). | |
|
521 | # | |
|
522 | # Having multiple entry here means divergence. | |
|
523 | # | |
|
524 | # - The second one handles successors defined in each marker. | |
|
525 | # | |
|
526 | # Having none means pruned node, multiple successors means split, | |
|
527 | # single successors are standard replacement. | |
|
528 | # | |
|
529 | for mark in sorted(succmarkers[current]): | |
|
530 | for suc in mark[1]: | |
|
531 | if suc not in cache: | |
|
532 | if suc in stackedset: | |
|
533 | # cycle breaking | |
|
534 | cache[suc] = [] | |
|
535 | else: | |
|
536 | # case (3) If we have not computed successors sets | |
|
537 | # of one of those successors we add it to the | |
|
538 | # `toproceed` stack and stop all work for this | |
|
539 | # iteration. | |
|
540 | toproceed.append(suc) | |
|
541 | stackedset.add(suc) | |
|
542 | break | |
|
543 | else: | |
|
544 | continue | |
|
545 | break | |
|
546 | else: | |
|
547 | # case (4): we know all successors sets of all direct | |
|
548 | # successors | |
|
549 | # | |
|
550 | # Successors set contributed by each marker depends on the | |
|
551 | # successors sets of all its "successors" node. | |
|
552 | # | |
|
553 | # Each different marker is a divergence in the obsolescence | |
|
554 | # history. It contributes successors sets dictinct from other | |
|
555 | # markers. | |
|
556 | # | |
|
557 | # Within a marker, a successor may have divergent successors | |
|
558 | # sets. In such a case, the marker will contribute multiple | |
|
559 | # divergent successors sets. If multiple successors have | |
|
560 | # divergents successors sets, a cartesian product is used. | |
|
561 | # | |
|
562 | # At the end we post-process successors sets to remove | |
|
563 | # duplicated entry and successors set that are strict subset of | |
|
564 | # another one. | |
|
565 | succssets = [] | |
|
566 | for mark in sorted(succmarkers[current]): | |
|
567 | # successors sets contributed by this marker | |
|
568 | markss = [[]] | |
|
569 | for suc in mark[1]: | |
|
570 | # cardinal product with previous successors | |
|
571 | productresult = [] | |
|
572 | for prefix in markss: | |
|
573 | for suffix in cache[suc]: | |
|
574 | newss = list(prefix) | |
|
575 | for part in suffix: | |
|
576 | # do not duplicated entry in successors set | |
|
577 | # first entry wins. | |
|
578 | if part not in newss: | |
|
579 | newss.append(part) | |
|
580 | productresult.append(newss) | |
|
581 | markss = productresult | |
|
582 | succssets.extend(markss) | |
|
583 | # remove duplicated and subset | |
|
584 | seen = [] | |
|
585 | final = [] | |
|
586 | candidate = sorted(((set(s), s) for s in succssets if s), | |
|
587 | key=lambda x: len(x[1]), reverse=True) | |
|
588 | for setversion, listversion in candidate: | |
|
589 | for seenset in seen: | |
|
590 | if setversion.issubset(seenset): | |
|
591 | break | |
|
592 | else: | |
|
593 | final.append(listversion) | |
|
594 | seen.append(setversion) | |
|
595 | final.reverse() # put small successors set first | |
|
596 | cache[current] = final | |
|
597 | return cache[initialnode] | |
|
598 | ||
|
405 | 599 | def _knownrevs(repo, nodes): |
|
406 | 600 | """yield revision numbers of known nodes passed in parameters |
|
407 | 601 | |
@@ -426,6 +620,7 b' def getrevs(repo, name):' | |||
|
426 | 620 | """Return the set of revision that belong to the <name> set |
|
427 | 621 | |
|
428 | 622 | Such access may compute the set and cache it for future use""" |
|
623 | repo = repo.unfiltered() | |
|
429 | 624 | if not repo.obsstore: |
|
430 | 625 | return () |
|
431 | 626 | if name not in repo.obsstore.caches: |
@@ -454,27 +649,35 b' def clearobscaches(repo):' | |||
|
454 | 649 | def _computeobsoleteset(repo): |
|
455 | 650 | """the set of obsolete revisions""" |
|
456 | 651 | obs = set() |
|
457 |
|
|
|
652 | getrev = repo.changelog.nodemap.get | |
|
653 | getphase = repo._phasecache.phase | |
|
458 | 654 | for node in repo.obsstore.successors: |
|
459 |
rev = |
|
|
460 | if rev is not None: | |
|
655 | rev = getrev(node) | |
|
656 | if rev is not None and getphase(repo, rev): | |
|
461 | 657 | obs.add(rev) |
|
462 | return set(repo.revs('%ld - public()', obs)) | |
|
658 | return obs | |
|
463 | 659 | |
|
464 | 660 | @cachefor('unstable') |
|
465 | 661 | def _computeunstableset(repo): |
|
466 | 662 | """the set of non obsolete revisions with obsolete parents""" |
|
467 | return set(repo.revs('(obsolete()::) - obsolete()')) | |
|
663 | # revset is not efficient enough here | |
|
664 | # we do (obsolete()::) - obsolete() by hand | |
|
665 | obs = getrevs(repo, 'obsolete') | |
|
666 | if not obs: | |
|
667 | return set() | |
|
668 | cl = repo.changelog | |
|
669 | return set(r for r in cl.descendants(obs) if r not in obs) | |
|
468 | 670 | |
|
469 | 671 | @cachefor('suspended') |
|
470 | 672 | def _computesuspendedset(repo): |
|
471 | 673 | """the set of obsolete parents with non obsolete descendants""" |
|
472 | return set(repo.revs('obsolete() and obsolete()::unstable()')) | |
|
674 | suspended = repo.changelog.ancestors(getrevs(repo, 'unstable')) | |
|
675 | return set(r for r in getrevs(repo, 'obsolete') if r in suspended) | |
|
473 | 676 | |
|
474 | 677 | @cachefor('extinct') |
|
475 | 678 | def _computeextinctset(repo): |
|
476 | 679 | """the set of obsolete parents without non obsolete descendants""" |
|
477 |
return |
|
|
680 | return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended') | |
|
478 | 681 | |
|
479 | 682 | |
|
480 | 683 | @cachefor('bumped') |
@@ -489,6 +692,28 b' def _computebumpedset(repo):' | |||
|
489 | 692 | query = '%ld - obsolete() - public()' |
|
490 | 693 | return set(repo.revs(query, _knownrevs(repo, successors))) |
|
491 | 694 | |
|
695 | @cachefor('divergent') | |
|
696 | def _computedivergentset(repo): | |
|
697 | """the set of rev that compete to be the final successors of some revision. | |
|
698 | """ | |
|
699 | divergent = set() | |
|
700 | obsstore = repo.obsstore | |
|
701 | newermap = {} | |
|
702 | for ctx in repo.set('(not public()) - obsolete()'): | |
|
703 | mark = obsstore.precursors.get(ctx.node(), ()) | |
|
704 | toprocess = set(mark) | |
|
705 | while toprocess: | |
|
706 | prec = toprocess.pop()[0] | |
|
707 | if prec not in newermap: | |
|
708 | successorssets(repo, prec, newermap) | |
|
709 | newer = [n for n in newermap[prec] if n] | |
|
710 | if len(newer) > 1: | |
|
711 | divergent.add(ctx.rev()) | |
|
712 | break | |
|
713 | toprocess.update(obsstore.precursors.get(prec, ())) | |
|
714 | return divergent | |
|
715 | ||
|
716 | ||
|
492 | 717 | def createmarkers(repo, relations, flag=0, metadata=None): |
|
493 | 718 | """Add obsolete markers between changesets in a repo |
|
494 | 719 | |
@@ -521,6 +746,7 b' def createmarkers(repo, relations, flag=' | |||
|
521 | 746 | if nprec in nsucs: |
|
522 | 747 | raise util.Abort("changeset %s cannot obsolete itself" % prec) |
|
523 | 748 | repo.obsstore.create(tr, nprec, nsucs, flag, metadata) |
|
749 | repo.filteredrevcache.clear() | |
|
524 | 750 | tr.close() |
|
525 | 751 | finally: |
|
526 | 752 | tr.release() |
@@ -276,6 +276,16 b' int entkind(struct dirent *ent)' | |||
|
276 | 276 | return -1; |
|
277 | 277 | } |
|
278 | 278 | |
|
279 | static PyObject *makestat(const struct stat *st) | |
|
280 | { | |
|
281 | PyObject *stat; | |
|
282 | ||
|
283 | stat = PyObject_CallObject((PyObject *)&listdir_stat_type, NULL); | |
|
284 | if (stat) | |
|
285 | memcpy(&((struct listdir_stat *)stat)->st, st, sizeof(*st)); | |
|
286 | return stat; | |
|
287 | } | |
|
288 | ||
|
279 | 289 | static PyObject *_listdir(char *path, int pathlen, int keepstat, char *skip) |
|
280 | 290 | { |
|
281 | 291 | PyObject *list, *elem, *stat, *ret = NULL; |
@@ -351,10 +361,9 b' static PyObject *_listdir(char *path, in' | |||
|
351 | 361 | } |
|
352 | 362 | |
|
353 | 363 | if (keepstat) { |
|
354 | stat = PyObject_CallObject((PyObject *)&listdir_stat_type, NULL); | |
|
364 | stat = makestat(&st); | |
|
355 | 365 | if (!stat) |
|
356 | 366 | goto error; |
|
357 | memcpy(&((struct listdir_stat *)stat)->st, &st, sizeof(st)); | |
|
358 | 367 | elem = Py_BuildValue("siN", ent->d_name, kind, stat); |
|
359 | 368 | } else |
|
360 | 369 | elem = Py_BuildValue("si", ent->d_name, kind); |
@@ -380,6 +389,55 b' error_value:' | |||
|
380 | 389 | return ret; |
|
381 | 390 | } |
|
382 | 391 | |
|
392 | static PyObject *statfiles(PyObject *self, PyObject *args) | |
|
393 | { | |
|
394 | PyObject *names, *stats; | |
|
395 | Py_ssize_t i, count; | |
|
396 | ||
|
397 | if (!PyArg_ParseTuple(args, "O:statfiles", &names)) | |
|
398 | return NULL; | |
|
399 | ||
|
400 | count = PySequence_Length(names); | |
|
401 | if (count == -1) { | |
|
402 | PyErr_SetString(PyExc_TypeError, "not a sequence"); | |
|
403 | return NULL; | |
|
404 | } | |
|
405 | ||
|
406 | stats = PyList_New(count); | |
|
407 | if (stats == NULL) | |
|
408 | return NULL; | |
|
409 | ||
|
410 | for (i = 0; i < count; i++) { | |
|
411 | PyObject *stat; | |
|
412 | struct stat st; | |
|
413 | int ret, kind; | |
|
414 | char *path; | |
|
415 | ||
|
416 | path = PyString_AsString(PySequence_GetItem(names, i)); | |
|
417 | if (path == NULL) { | |
|
418 | PyErr_SetString(PyExc_TypeError, "not a string"); | |
|
419 | goto bail; | |
|
420 | } | |
|
421 | ret = lstat(path, &st); | |
|
422 | kind = st.st_mode & S_IFMT; | |
|
423 | if (ret != -1 && (kind == S_IFREG || kind == S_IFLNK)) { | |
|
424 | stat = makestat(&st); | |
|
425 | if (stat == NULL) | |
|
426 | goto bail; | |
|
427 | PyList_SET_ITEM(stats, i, stat); | |
|
428 | } else { | |
|
429 | Py_INCREF(Py_None); | |
|
430 | PyList_SET_ITEM(stats, i, Py_None); | |
|
431 | } | |
|
432 | } | |
|
433 | ||
|
434 | return stats; | |
|
435 | ||
|
436 | bail: | |
|
437 | Py_DECREF(stats); | |
|
438 | return NULL; | |
|
439 | } | |
|
440 | ||
|
383 | 441 | #endif /* ndef _WIN32 */ |
|
384 | 442 | |
|
385 | 443 | static PyObject *listdir(PyObject *self, PyObject *args, PyObject *kwargs) |
@@ -544,6 +602,10 b' static PyMethodDef methods[] = {' | |||
|
544 | 602 | {"posixfile", (PyCFunction)posixfile, METH_VARARGS | METH_KEYWORDS, |
|
545 | 603 | "Open a file with POSIX-like semantics.\n" |
|
546 | 604 | "On error, this function may raise either a WindowsError or an IOError."}, |
|
605 | #else | |
|
606 | {"statfiles", (PyCFunction)statfiles, METH_VARARGS | METH_KEYWORDS, | |
|
607 | "stat a series of files or symlinks\n" | |
|
608 | "Returns None for non-existent entries and entries of other types.\n"}, | |
|
547 | 609 | #endif |
|
548 | 610 | #ifdef __APPLE__ |
|
549 | 611 | { |
@@ -1508,6 +1508,7 b' static char parsers_doc[] = "Efficient c' | |||
|
1508 | 1508 | |
|
1509 | 1509 | PyObject *encodedir(PyObject *self, PyObject *args); |
|
1510 | 1510 | PyObject *pathencode(PyObject *self, PyObject *args); |
|
1511 | PyObject *lowerencode(PyObject *self, PyObject *args); | |
|
1511 | 1512 | |
|
1512 | 1513 | static PyMethodDef methods[] = { |
|
1513 | 1514 | {"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"}, |
@@ -1516,6 +1517,7 b' static PyMethodDef methods[] = {' | |||
|
1516 | 1517 | {"parse_index2", parse_index2, METH_VARARGS, "parse a revlog index\n"}, |
|
1517 | 1518 | {"encodedir", encodedir, METH_VARARGS, "encodedir a path\n"}, |
|
1518 | 1519 | {"pathencode", pathencode, METH_VARARGS, "fncache-encode a path\n"}, |
|
1520 | {"lowerencode", lowerencode, METH_VARARGS, "lower-encode a path\n"}, | |
|
1519 | 1521 | {NULL, NULL} |
|
1520 | 1522 | }; |
|
1521 | 1523 |
@@ -6,7 +6,7 b'' | |||
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | import cStringIO, email.Parser, os, errno, re | |
|
9 | import cStringIO, email.Parser, os, errno, re, posixpath | |
|
10 | 10 | import tempfile, zlib, shutil |
|
11 | 11 | |
|
12 | 12 | from i18n import _ |
@@ -439,11 +439,7 b' class fsbackend(abstractbackend):' | |||
|
439 | 439 | util.setflags(self._join(fname), False, True) |
|
440 | 440 | |
|
441 | 441 | def unlink(self, fname): |
|
442 | try: | |
|
443 | util.unlinkpath(self._join(fname)) | |
|
444 | except OSError, inst: | |
|
445 | if inst.errno != errno.ENOENT: | |
|
446 | raise | |
|
442 | util.unlinkpath(self._join(fname), ignoremissing=True) | |
|
447 | 443 | |
|
448 | 444 | def writerej(self, fname, failed, total, lines): |
|
449 | 445 | fname = fname + ".rej" |
@@ -1007,7 +1003,7 b' class hunk(object):' | |||
|
1007 | 1003 | |
|
1008 | 1004 | bot = min(fuzz, bot) |
|
1009 | 1005 | top = min(fuzz, top) |
|
1010 | return old[top:len(old)-bot], new[top:len(new)-bot], top | |
|
1006 | return old[top:len(old) - bot], new[top:len(new) - bot], top | |
|
1011 | 1007 | return old, new, 0 |
|
1012 | 1008 | |
|
1013 | 1009 | def fuzzit(self, fuzz, toponly): |
@@ -1514,44 +1510,6 b' def changedfiles(ui, repo, patchpath, st' | |||
|
1514 | 1510 | finally: |
|
1515 | 1511 | fp.close() |
|
1516 | 1512 | |
|
1517 | def b85diff(to, tn): | |
|
1518 | '''print base85-encoded binary diff''' | |
|
1519 | def gitindex(text): | |
|
1520 | if not text: | |
|
1521 | return hex(nullid) | |
|
1522 | l = len(text) | |
|
1523 | s = util.sha1('blob %d\0' % l) | |
|
1524 | s.update(text) | |
|
1525 | return s.hexdigest() | |
|
1526 | ||
|
1527 | def fmtline(line): | |
|
1528 | l = len(line) | |
|
1529 | if l <= 26: | |
|
1530 | l = chr(ord('A') + l - 1) | |
|
1531 | else: | |
|
1532 | l = chr(l - 26 + ord('a') - 1) | |
|
1533 | return '%c%s\n' % (l, base85.b85encode(line, True)) | |
|
1534 | ||
|
1535 | def chunk(text, csize=52): | |
|
1536 | l = len(text) | |
|
1537 | i = 0 | |
|
1538 | while i < l: | |
|
1539 | yield text[i:i + csize] | |
|
1540 | i += csize | |
|
1541 | ||
|
1542 | tohash = gitindex(to) | |
|
1543 | tnhash = gitindex(tn) | |
|
1544 | if tohash == tnhash: | |
|
1545 | return "" | |
|
1546 | ||
|
1547 | # TODO: deltas | |
|
1548 | ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' % | |
|
1549 | (tohash, tnhash, len(tn))] | |
|
1550 | for l in chunk(zlib.compress(tn)): | |
|
1551 | ret.append(fmtline(l)) | |
|
1552 | ret.append('\n') | |
|
1553 | return ''.join(ret) | |
|
1554 | ||
|
1555 | 1513 | class GitDiffRequired(Exception): |
|
1556 | 1514 | pass |
|
1557 | 1515 | |
@@ -1622,9 +1580,8 b' def diff(repo, node1=None, node2=None, m' | |||
|
1622 | 1580 | return [] |
|
1623 | 1581 | |
|
1624 | 1582 | revs = None |
|
1625 | if not repo.ui.quiet: | |
|
1626 | hexfunc = repo.ui.debugflag and hex or short | |
|
1627 | revs = [hexfunc(node) for node in [node1, node2] if node] | |
|
1583 | hexfunc = repo.ui.debugflag and hex or short | |
|
1584 | revs = [hexfunc(node) for node in [node1, node2] if node] | |
|
1628 | 1585 | |
|
1629 | 1586 | copy = {} |
|
1630 | 1587 | if opts.git or opts.upgrade: |
@@ -1690,17 +1647,45 b' def diffui(*args, **kw):' | |||
|
1690 | 1647 | '''like diff(), but yields 2-tuples of (output, label) for ui.write()''' |
|
1691 | 1648 | return difflabel(diff, *args, **kw) |
|
1692 | 1649 | |
|
1693 | ||
|
1694 | def _addmodehdr(header, omode, nmode): | |
|
1695 | if omode != nmode: | |
|
1696 | header.append('old mode %s\n' % omode) | |
|
1697 | header.append('new mode %s\n' % nmode) | |
|
1698 | ||
|
1699 | 1650 | def trydiff(repo, revs, ctx1, ctx2, modified, added, removed, |
|
1700 | 1651 | copy, getfilectx, opts, losedatafn, prefix): |
|
1701 | 1652 | |
|
1702 | 1653 | def join(f): |
|
1703 |
return |
|
|
1654 | return posixpath.join(prefix, f) | |
|
1655 | ||
|
1656 | def addmodehdr(header, omode, nmode): | |
|
1657 | if omode != nmode: | |
|
1658 | header.append('old mode %s\n' % omode) | |
|
1659 | header.append('new mode %s\n' % nmode) | |
|
1660 | ||
|
1661 | def addindexmeta(meta, revs): | |
|
1662 | if opts.git: | |
|
1663 | i = len(revs) | |
|
1664 | if i==2: | |
|
1665 | meta.append('index %s..%s\n' % tuple(revs)) | |
|
1666 | elif i==3: | |
|
1667 | meta.append('index %s,%s..%s\n' % tuple(revs)) | |
|
1668 | ||
|
1669 | def gitindex(text): | |
|
1670 | if not text: | |
|
1671 | return hex(nullid) | |
|
1672 | l = len(text) | |
|
1673 | s = util.sha1('blob %d\0' % l) | |
|
1674 | s.update(text) | |
|
1675 | return s.hexdigest() | |
|
1676 | ||
|
1677 | def diffline(a, b, revs): | |
|
1678 | if opts.git: | |
|
1679 | line = 'diff --git a/%s b/%s\n' % (a, b) | |
|
1680 | elif not repo.ui.quiet: | |
|
1681 | if revs: | |
|
1682 | revinfo = ' '.join(["-r %s" % rev for rev in revs]) | |
|
1683 | line = 'diff %s %s\n' % (revinfo, a) | |
|
1684 | else: | |
|
1685 | line = 'diff %s\n' % a | |
|
1686 | else: | |
|
1687 | line = '' | |
|
1688 | return line | |
|
1704 | 1689 | |
|
1705 | 1690 | date1 = util.datestr(ctx1.date()) |
|
1706 | 1691 | man1 = ctx1.manifest() |
@@ -1733,7 +1718,7 b' def trydiff(repo, revs, ctx1, ctx2, modi' | |||
|
1733 | 1718 | else: |
|
1734 | 1719 | a = copyto[f] |
|
1735 | 1720 | omode = gitmode[man1.flags(a)] |
|
1736 |
|
|
|
1721 | addmodehdr(header, omode, mode) | |
|
1737 | 1722 | if a in removed and a not in gone: |
|
1738 | 1723 | op = 'rename' |
|
1739 | 1724 | gone.add(a) |
@@ -1779,22 +1764,24 b' def trydiff(repo, revs, ctx1, ctx2, modi' | |||
|
1779 | 1764 | nflag = ctx2.flags(f) |
|
1780 | 1765 | binary = util.binary(to) or util.binary(tn) |
|
1781 | 1766 | if opts.git: |
|
1782 |
|
|
|
1767 | addmodehdr(header, gitmode[oflag], gitmode[nflag]) | |
|
1783 | 1768 | if binary: |
|
1784 | 1769 | dodiff = 'binary' |
|
1785 | 1770 | elif binary or nflag != oflag: |
|
1786 | 1771 | losedatafn(f) |
|
1787 | if opts.git: | |
|
1788 | header.insert(0, mdiff.diffline(revs, join(a), join(b), opts)) | |
|
1789 | 1772 | |
|
1790 | 1773 | if dodiff: |
|
1774 | if opts.git or revs: | |
|
1775 | header.insert(0, diffline(join(a), join(b), revs)) | |
|
1791 | 1776 | if dodiff == 'binary': |
|
1792 | text = b85diff(to, tn) | |
|
1777 | text = mdiff.b85diff(to, tn) | |
|
1778 | if text: | |
|
1779 | addindexmeta(header, [gitindex(to), gitindex(tn)]) | |
|
1793 | 1780 | else: |
|
1794 | 1781 | text = mdiff.unidiff(to, date1, |
|
1795 | 1782 | # ctx2 date may be dynamic |
|
1796 | 1783 | tn, util.datestr(ctx2.date()), |
|
1797 |
join(a), join(b), |
|
|
1784 | join(a), join(b), opts=opts) | |
|
1798 | 1785 | if header and (text or len(header) > 1): |
|
1799 | 1786 | yield ''.join(header) |
|
1800 | 1787 | if text: |
@@ -15,6 +15,7 b'' | |||
|
15 | 15 | * required. |
|
16 | 16 | */ |
|
17 | 17 | |
|
18 | #define PY_SSIZE_T_CLEAN | |
|
18 | 19 | #include <Python.h> |
|
19 | 20 | #include <assert.h> |
|
20 | 21 | #include <ctype.h> |
@@ -481,12 +482,244 b' static Py_ssize_t basicencode(char *dest' | |||
|
481 | 482 | |
|
482 | 483 | static const Py_ssize_t maxstorepathlen = 120; |
|
483 | 484 | |
|
485 | static Py_ssize_t _lowerencode(char *dest, size_t destsize, | |
|
486 | const char *src, Py_ssize_t len) | |
|
487 | { | |
|
488 | static const uint32_t onebyte[8] = { | |
|
489 | 1, 0x2bfffbfb, 0xe8000001, 0x2fffffff | |
|
490 | }; | |
|
491 | ||
|
492 | static const uint32_t lower[8] = { 0, 0, 0x7fffffe }; | |
|
493 | ||
|
494 | Py_ssize_t i, destlen = 0; | |
|
495 | ||
|
496 | for (i = 0; i < len; i++) { | |
|
497 | if (inset(onebyte, src[i])) | |
|
498 | charcopy(dest, &destlen, destsize, src[i]); | |
|
499 | else if (inset(lower, src[i])) | |
|
500 | charcopy(dest, &destlen, destsize, src[i] + 32); | |
|
501 | else | |
|
502 | escape3(dest, &destlen, destsize, src[i]); | |
|
503 | } | |
|
504 | ||
|
505 | return destlen; | |
|
506 | } | |
|
507 | ||
|
508 | PyObject *lowerencode(PyObject *self, PyObject *args) | |
|
509 | { | |
|
510 | char *path; | |
|
511 | Py_ssize_t len, newlen; | |
|
512 | PyObject *ret; | |
|
513 | ||
|
514 | if (!PyArg_ParseTuple(args, "s#:lowerencode", &path, &len)) | |
|
515 | return NULL; | |
|
516 | ||
|
517 | newlen = _lowerencode(NULL, 0, path, len); | |
|
518 | ret = PyString_FromStringAndSize(NULL, newlen); | |
|
519 | if (ret) | |
|
520 | newlen = _lowerencode(PyString_AS_STRING(ret), newlen, | |
|
521 | path, len); | |
|
522 | ||
|
523 | return ret; | |
|
524 | } | |
|
525 | ||
|
526 | /* See store.py:_auxencode for a description. */ | |
|
527 | static Py_ssize_t auxencode(char *dest, size_t destsize, | |
|
528 | const char *src, Py_ssize_t len) | |
|
529 | { | |
|
530 | static const uint32_t twobytes[8]; | |
|
531 | ||
|
532 | static const uint32_t onebyte[8] = { | |
|
533 | ~0, 0xffff3ffe, ~0, ~0, ~0, ~0, ~0, ~0, | |
|
534 | }; | |
|
535 | ||
|
536 | return _encode(twobytes, onebyte, dest, 0, destsize, src, len, 0); | |
|
537 | } | |
|
538 | ||
|
539 | static PyObject *hashmangle(const char *src, Py_ssize_t len, const char sha[20]) | |
|
540 | { | |
|
541 | static const Py_ssize_t dirprefixlen = 8; | |
|
542 | static const Py_ssize_t maxshortdirslen = 68; | |
|
543 | char *dest; | |
|
544 | PyObject *ret; | |
|
545 | ||
|
546 | Py_ssize_t i, d, p, lastslash = len - 1, lastdot = -1; | |
|
547 | Py_ssize_t destsize, destlen = 0, slop, used; | |
|
548 | ||
|
549 | while (lastslash >= 0 && src[lastslash] != '/') { | |
|
550 | if (src[lastslash] == '.' && lastdot == -1) | |
|
551 | lastdot = lastslash; | |
|
552 | lastslash--; | |
|
553 | } | |
|
554 | ||
|
555 | #if 0 | |
|
556 | /* All paths should end in a suffix of ".i" or ".d". | |
|
557 | Unfortunately, the file names in test-hybridencode.py | |
|
558 | violate this rule. */ | |
|
559 | if (lastdot != len - 3) { | |
|
560 | PyErr_SetString(PyExc_ValueError, | |
|
561 | "suffix missing or wrong length"); | |
|
562 | return NULL; | |
|
563 | } | |
|
564 | #endif | |
|
565 | ||
|
566 | /* If src contains a suffix, we will append it to the end of | |
|
567 | the new string, so make room. */ | |
|
568 | destsize = 120; | |
|
569 | if (lastdot >= 0) | |
|
570 | destsize += len - lastdot - 1; | |
|
571 | ||
|
572 | ret = PyString_FromStringAndSize(NULL, destsize); | |
|
573 | if (ret == NULL) | |
|
574 | return NULL; | |
|
575 | ||
|
576 | dest = PyString_AS_STRING(ret); | |
|
577 | memcopy(dest, &destlen, destsize, "dh/", 3); | |
|
578 | ||
|
579 | /* Copy up to dirprefixlen bytes of each path component, up to | |
|
580 | a limit of maxshortdirslen bytes. */ | |
|
581 | for (i = d = p = 0; i < lastslash; i++, p++) { | |
|
582 | if (src[i] == '/') { | |
|
583 | char d = dest[destlen - 1]; | |
|
584 | /* After truncation, a directory name may end | |
|
585 | in a space or dot, which are unportable. */ | |
|
586 | if (d == '.' || d == ' ') | |
|
587 | dest[destlen - 1] = '_'; | |
|
588 | if (destlen > maxshortdirslen) | |
|
589 | break; | |
|
590 | charcopy(dest, &destlen, destsize, src[i]); | |
|
591 | p = -1; | |
|
592 | } | |
|
593 | else if (p < dirprefixlen) | |
|
594 | charcopy(dest, &destlen, destsize, src[i]); | |
|
595 | } | |
|
596 | ||
|
597 | /* Rewind to just before the last slash copied. */ | |
|
598 | if (destlen > maxshortdirslen + 3) | |
|
599 | do { | |
|
600 | destlen--; | |
|
601 | } while (destlen > 0 && dest[destlen] != '/'); | |
|
602 | ||
|
603 | if (destlen > 3) { | |
|
604 | if (lastslash > 0) { | |
|
605 | char d = dest[destlen - 1]; | |
|
606 | /* The last directory component may be | |
|
607 | truncated, so make it safe. */ | |
|
608 | if (d == '.' || d == ' ') | |
|
609 | dest[destlen - 1] = '_'; | |
|
610 | } | |
|
611 | ||
|
612 | charcopy(dest, &destlen, destsize, '/'); | |
|
613 | } | |
|
614 | ||
|
615 | /* Add a prefix of the original file's name. Its length | |
|
616 | depends on the number of bytes left after accounting for | |
|
617 | hash and suffix. */ | |
|
618 | used = destlen + 40; | |
|
619 | if (lastdot >= 0) | |
|
620 | used += len - lastdot - 1; | |
|
621 | slop = maxstorepathlen - used; | |
|
622 | if (slop > 0) { | |
|
623 | Py_ssize_t basenamelen = | |
|
624 | lastslash >= 0 ? len - lastslash - 2 : len - 1; | |
|
625 | ||
|
626 | if (basenamelen > slop) | |
|
627 | basenamelen = slop; | |
|
628 | if (basenamelen > 0) | |
|
629 | memcopy(dest, &destlen, destsize, &src[lastslash + 1], | |
|
630 | basenamelen); | |
|
631 | } | |
|
632 | ||
|
633 | /* Add hash and suffix. */ | |
|
634 | for (i = 0; i < 20; i++) | |
|
635 | hexencode(dest, &destlen, destsize, sha[i]); | |
|
636 | ||
|
637 | if (lastdot >= 0) | |
|
638 | memcopy(dest, &destlen, destsize, &src[lastdot], | |
|
639 | len - lastdot - 1); | |
|
640 | ||
|
641 | PyString_GET_SIZE(ret) = destlen; | |
|
642 | ||
|
643 | return ret; | |
|
644 | } | |
|
645 | ||
|
484 | 646 | /* |
|
485 | * We currently implement only basic encoding. | |
|
486 | * | |
|
487 | * If a name is too long to encode due to Windows path name limits, | |
|
488 | * this function returns None. | |
|
647 | * Avoiding a trip through Python would improve performance by 50%, | |
|
648 | * but we don't encounter enough long names to be worth the code. | |
|
489 | 649 | */ |
|
650 | static int sha1hash(char hash[20], const char *str, Py_ssize_t len) | |
|
651 | { | |
|
652 | static PyObject *shafunc; | |
|
653 | PyObject *shaobj, *hashobj; | |
|
654 | ||
|
655 | if (shafunc == NULL) { | |
|
656 | PyObject *util, *name = PyString_FromString("mercurial.util"); | |
|
657 | ||
|
658 | if (name == NULL) | |
|
659 | return -1; | |
|
660 | ||
|
661 | util = PyImport_Import(name); | |
|
662 | Py_DECREF(name); | |
|
663 | ||
|
664 | if (util == NULL) { | |
|
665 | PyErr_SetString(PyExc_ImportError, "mercurial.util"); | |
|
666 | return -1; | |
|
667 | } | |
|
668 | shafunc = PyObject_GetAttrString(util, "sha1"); | |
|
669 | Py_DECREF(util); | |
|
670 | ||
|
671 | if (shafunc == NULL) { | |
|
672 | PyErr_SetString(PyExc_AttributeError, | |
|
673 | "module 'mercurial.util' has no " | |
|
674 | "attribute 'sha1'"); | |
|
675 | return -1; | |
|
676 | } | |
|
677 | } | |
|
678 | ||
|
679 | shaobj = PyObject_CallFunction(shafunc, "s#", str, len); | |
|
680 | ||
|
681 | if (shaobj == NULL) | |
|
682 | return -1; | |
|
683 | ||
|
684 | hashobj = PyObject_CallMethod(shaobj, "digest", ""); | |
|
685 | Py_DECREF(shaobj); | |
|
686 | ||
|
687 | if (!PyString_Check(hashobj) || PyString_GET_SIZE(hashobj) != 20) { | |
|
688 | PyErr_SetString(PyExc_TypeError, | |
|
689 | "result of digest is not a 20-byte hash"); | |
|
690 | Py_DECREF(hashobj); | |
|
691 | return -1; | |
|
692 | } | |
|
693 | ||
|
694 | memcpy(hash, PyString_AS_STRING(hashobj), 20); | |
|
695 | Py_DECREF(hashobj); | |
|
696 | return 0; | |
|
697 | } | |
|
698 | ||
|
699 | #define MAXENCODE 4096 * 3 | |
|
700 | ||
|
701 | static PyObject *hashencode(const char *src, Py_ssize_t len) | |
|
702 | { | |
|
703 | char dired[MAXENCODE]; | |
|
704 | char lowered[MAXENCODE]; | |
|
705 | char auxed[MAXENCODE]; | |
|
706 | Py_ssize_t dirlen, lowerlen, auxlen, baselen; | |
|
707 | char sha[20]; | |
|
708 | ||
|
709 | baselen = (len - 5) * 3; | |
|
710 | if (baselen >= MAXENCODE) { | |
|
711 | PyErr_SetString(PyExc_ValueError, "string too long"); | |
|
712 | return NULL; | |
|
713 | } | |
|
714 | ||
|
715 | dirlen = _encodedir(dired, baselen, src, len); | |
|
716 | if (sha1hash(sha, dired, dirlen - 1) == -1) | |
|
717 | return NULL; | |
|
718 | lowerlen = _lowerencode(lowered, baselen, dired + 5, dirlen - 5); | |
|
719 | auxlen = auxencode(auxed, baselen, lowered, lowerlen); | |
|
720 | return hashmangle(auxed, auxlen, sha); | |
|
721 | } | |
|
722 | ||
|
490 | 723 | PyObject *pathencode(PyObject *self, PyObject *args) |
|
491 | 724 | { |
|
492 | 725 | Py_ssize_t len, newlen; |
@@ -501,13 +734,10 b' PyObject *pathencode(PyObject *self, PyO' | |||
|
501 | 734 | return NULL; |
|
502 | 735 | } |
|
503 | 736 | |
|
504 |
if (len > maxstorepathlen) |
|
|
505 | newobj = Py_None; | |
|
506 | Py_INCREF(newobj); | |
|
507 | return newobj; | |
|
508 | } | |
|
509 | ||
|
510 | newlen = len ? basicencode(NULL, 0, path, len + 1) : 1; | |
|
737 | if (len > maxstorepathlen) | |
|
738 | newlen = maxstorepathlen + 2; | |
|
739 | else | |
|
740 | newlen = len ? basicencode(NULL, 0, path, len + 1) : 1; | |
|
511 | 741 | |
|
512 | 742 | if (newlen <= maxstorepathlen + 1) { |
|
513 | 743 | if (newlen == len + 1) { |
@@ -522,10 +752,9 b' PyObject *pathencode(PyObject *self, PyO' | |||
|
522 | 752 | basicencode(PyString_AS_STRING(newobj), newlen, path, |
|
523 | 753 | len + 1); |
|
524 | 754 | } |
|
525 | } else { | |
|
526 | newobj = Py_None; | |
|
527 | Py_INCREF(newobj); | |
|
528 | 755 | } |
|
756 | else | |
|
757 | newobj = hashencode(path, len + 1); | |
|
529 | 758 | |
|
530 | 759 | return newobj; |
|
531 | 760 | } |
@@ -104,30 +104,11 b' import errno' | |||
|
104 | 104 | from node import nullid, nullrev, bin, hex, short |
|
105 | 105 | from i18n import _ |
|
106 | 106 | import util, error |
|
107 | import obsolete | |
|
108 | 107 | |
|
109 | 108 | allphases = public, draft, secret = range(3) |
|
110 | 109 | trackedphases = allphases[1:] |
|
111 | 110 | phasenames = ['public', 'draft', 'secret'] |
|
112 | 111 | |
|
113 | def _filterunknown(ui, changelog, phaseroots): | |
|
114 | """remove unknown nodes from the phase boundary | |
|
115 | ||
|
116 | Nothing is lost as unknown nodes only hold data for their descendants. | |
|
117 | """ | |
|
118 | updated = False | |
|
119 | nodemap = changelog.nodemap # to filter unknown nodes | |
|
120 | for phase, nodes in enumerate(phaseroots): | |
|
121 | missing = [node for node in nodes if node not in nodemap] | |
|
122 | if missing: | |
|
123 | for mnode in missing: | |
|
124 | ui.debug( | |
|
125 | 'removing unknown node %s from %i-phase boundary\n' | |
|
126 | % (short(mnode), phase)) | |
|
127 | nodes.symmetric_difference_update(missing) | |
|
128 | updated = True | |
|
129 | return updated | |
|
130 | ||
|
131 | 112 | def _readroots(repo, phasedefaults=None): |
|
132 | 113 | """Read phase roots from disk |
|
133 | 114 | |
@@ -139,6 +120,7 b' def _readroots(repo, phasedefaults=None)' | |||
|
139 | 120 | Return (roots, dirty) where dirty is true if roots differ from |
|
140 | 121 | what is being stored. |
|
141 | 122 | """ |
|
123 | repo = repo.unfiltered() | |
|
142 | 124 | dirty = False |
|
143 | 125 | roots = [set() for i in allphases] |
|
144 | 126 | try: |
@@ -156,8 +138,6 b' def _readroots(repo, phasedefaults=None)' | |||
|
156 | 138 | for f in phasedefaults: |
|
157 | 139 | roots = f(repo, roots) |
|
158 | 140 | dirty = True |
|
159 | if _filterunknown(repo.ui, repo.changelog, roots): | |
|
160 | dirty = True | |
|
161 | 141 | return roots, dirty |
|
162 | 142 | |
|
163 | 143 | class phasecache(object): |
@@ -165,8 +145,9 b' class phasecache(object):' | |||
|
165 | 145 | if _load: |
|
166 | 146 | # Cheap trick to allow shallow-copy without copy module |
|
167 | 147 | self.phaseroots, self.dirty = _readroots(repo, phasedefaults) |
|
148 | self._phaserevs = None | |
|
149 | self.filterunknown(repo) | |
|
168 | 150 | self.opener = repo.sopener |
|
169 | self._phaserevs = None | |
|
170 | 151 | |
|
171 | 152 | def copy(self): |
|
172 | 153 | # Shallow copy meant to ensure isolation in |
@@ -184,6 +165,7 b' class phasecache(object):' | |||
|
184 | 165 | |
|
185 | 166 | def getphaserevs(self, repo, rebuild=False): |
|
186 | 167 | if rebuild or self._phaserevs is None: |
|
168 | repo = repo.unfiltered() | |
|
187 | 169 | revs = [public] * len(repo.changelog) |
|
188 | 170 | for phase in trackedphases: |
|
189 | 171 | roots = map(repo.changelog.rev, self.phaseroots[phase]) |
@@ -228,6 +210,7 b' class phasecache(object):' | |||
|
228 | 210 | # Be careful to preserve shallow-copied values: do not update |
|
229 | 211 | # phaseroots values, replace them. |
|
230 | 212 | |
|
213 | repo = repo.unfiltered() | |
|
231 | 214 | delroots = [] # set of root deleted by this path |
|
232 | 215 | for phase in xrange(targetphase + 1, len(allphases)): |
|
233 | 216 | # filter nodes that are not in a compatible phase already |
@@ -245,12 +228,13 b' class phasecache(object):' | |||
|
245 | 228 | # declare deleted root in the target phase |
|
246 | 229 | if targetphase != 0: |
|
247 | 230 | self.retractboundary(repo, targetphase, delroots) |
|
248 | obsolete.clearobscaches(repo) | |
|
231 | repo.invalidatevolatilesets() | |
|
249 | 232 | |
|
250 | 233 | def retractboundary(self, repo, targetphase, nodes): |
|
251 | 234 | # Be careful to preserve shallow-copied values: do not update |
|
252 | 235 | # phaseroots values, replace them. |
|
253 | 236 | |
|
237 | repo = repo.unfiltered() | |
|
254 | 238 | currentroots = self.phaseroots[targetphase] |
|
255 | 239 | newroots = [n for n in nodes |
|
256 | 240 | if self.phase(repo, repo[n].rev()) < targetphase] |
@@ -262,7 +246,27 b' class phasecache(object):' | |||
|
262 | 246 | ctxs = repo.set('roots(%ln::)', currentroots) |
|
263 | 247 | currentroots.intersection_update(ctx.node() for ctx in ctxs) |
|
264 | 248 | self._updateroots(targetphase, currentroots) |
|
265 | obsolete.clearobscaches(repo) | |
|
249 | repo.invalidatevolatilesets() | |
|
250 | ||
|
251 | def filterunknown(self, repo): | |
|
252 | """remove unknown nodes from the phase boundary | |
|
253 | ||
|
254 | Nothing is lost as unknown nodes only hold data for their descendants. | |
|
255 | """ | |
|
256 | filtered = False | |
|
257 | nodemap = repo.changelog.nodemap # to filter unknown nodes | |
|
258 | for phase, nodes in enumerate(self.phaseroots): | |
|
259 | missing = [node for node in nodes if node not in nodemap] | |
|
260 | if missing: | |
|
261 | for mnode in missing: | |
|
262 | repo.ui.debug( | |
|
263 | 'removing unknown node %s from %i-phase boundary\n' | |
|
264 | % (short(mnode), phase)) | |
|
265 | nodes.symmetric_difference_update(missing) | |
|
266 | filtered = True | |
|
267 | if filtered: | |
|
268 | self.dirty = True | |
|
269 | self._phaserevs = None | |
|
266 | 270 | |
|
267 | 271 | def advanceboundary(repo, targetphase, nodes): |
|
268 | 272 | """Add nodes to a phase changing other nodes phases if necessary. |
@@ -316,6 +320,7 b' def listphases(repo):' | |||
|
316 | 320 | |
|
317 | 321 | def pushphase(repo, nhex, oldphasestr, newphasestr): |
|
318 | 322 | """List phases root for serialization over pushkey""" |
|
323 | repo = repo.unfiltered() | |
|
319 | 324 | lock = repo.lock() |
|
320 | 325 | try: |
|
321 | 326 | currentphase = repo[nhex].phase() |
@@ -340,6 +345,7 b' def analyzeremotephases(repo, subset, ro' | |||
|
340 | 345 | |
|
341 | 346 | Accept unknown element input |
|
342 | 347 | """ |
|
348 | repo = repo.unfiltered() | |
|
343 | 349 | # build list from dictionary |
|
344 | 350 | draftroots = [] |
|
345 | 351 | nodemap = repo.changelog.nodemap # to filter unknown nodes |
@@ -367,6 +373,7 b' def newheads(repo, heads, roots):' | |||
|
367 | 373 | |
|
368 | 374 | * `heads`: define the first subset |
|
369 | 375 | * `roots`: define the second we subtract from the first""" |
|
376 | repo = repo.unfiltered() | |
|
370 | 377 | revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))', |
|
371 | 378 | heads, roots, roots, heads) |
|
372 | 379 | return [c.node() for c in revset] |
@@ -7,7 +7,7 b'' | |||
|
7 | 7 | |
|
8 | 8 | from i18n import _ |
|
9 | 9 | import encoding |
|
10 | import os, sys, errno, stat, getpass, pwd, grp, tempfile, unicodedata | |
|
10 | import os, sys, errno, stat, getpass, pwd, grp, socket, tempfile, unicodedata | |
|
11 | 11 | |
|
12 | 12 | posixfile = open |
|
13 | 13 | normpath = os.path.normpath |
@@ -21,14 +21,26 b' umask = os.umask(0)' | |||
|
21 | 21 | os.umask(umask) |
|
22 | 22 | |
|
23 | 23 | def split(p): |
|
24 |
'''Same as |
|
|
24 | '''Same as posixpath.split, but faster | |
|
25 | ||
|
26 | >>> import posixpath | |
|
27 | >>> for f in ['/absolute/path/to/file', | |
|
28 | ... 'relative/path/to/file', | |
|
29 | ... 'file_alone', | |
|
30 | ... 'path/to/directory/', | |
|
31 | ... '/multiple/path//separators', | |
|
32 | ... '/file_at_root', | |
|
33 | ... '///multiple_leading_separators_at_root', | |
|
34 | ... '']: | |
|
35 | ... assert split(f) == posixpath.split(f), f | |
|
36 | ''' | |
|
25 | 37 | ht = p.rsplit('/', 1) |
|
26 | 38 | if len(ht) == 1: |
|
27 | 39 | return '', p |
|
28 | 40 | nh = ht[0].rstrip('/') |
|
29 | 41 | if nh: |
|
30 | 42 | return nh, ht[1] |
|
31 | return ht | |
|
43 | return ht[0] + '/', ht[1] | |
|
32 | 44 | |
|
33 | 45 | def openhardlinks(): |
|
34 | 46 | '''return true if it is safe to hold open file handles to hardlinks''' |
@@ -352,12 +364,18 b' def findexe(command):' | |||
|
352 | 364 | def setsignalhandler(): |
|
353 | 365 | pass |
|
354 | 366 | |
|
367 | _wantedkinds = set([stat.S_IFREG, stat.S_IFLNK]) | |
|
368 | ||
|
355 | 369 | def statfiles(files): |
|
356 |
'Stat each file in files |
|
|
370 | '''Stat each file in files. Yield each stat, or None if a file does not | |
|
371 | exist or has a type we don't care about.''' | |
|
357 | 372 | lstat = os.lstat |
|
373 | getkind = stat.S_IFMT | |
|
358 | 374 | for nf in files: |
|
359 | 375 | try: |
|
360 | 376 | st = lstat(nf) |
|
377 | if getkind(st.st_mode) not in _wantedkinds: | |
|
378 | st = None | |
|
361 | 379 | except OSError, err: |
|
362 | 380 | if err.errno not in (errno.ENOENT, errno.ENOTDIR): |
|
363 | 381 | raise |
@@ -437,9 +455,13 b' def termwidth():' | |||
|
437 | 455 | def makedir(path, notindexed): |
|
438 | 456 | os.mkdir(path) |
|
439 | 457 | |
|
440 | def unlinkpath(f): | |
|
458 | def unlinkpath(f, ignoremissing=False): | |
|
441 | 459 | """unlink and remove the directory if it is empty""" |
|
442 | os.unlink(f) | |
|
460 | try: | |
|
461 | os.unlink(f) | |
|
462 | except OSError, e: | |
|
463 | if not (ignoremissing and e.errno == errno.ENOENT): | |
|
464 | raise | |
|
443 | 465 | # try removing directories that might now be empty |
|
444 | 466 | try: |
|
445 | 467 | os.removedirs(os.path.dirname(f)) |
@@ -468,7 +490,20 b' class cachestat(object):' | |||
|
468 | 490 | |
|
469 | 491 | def __eq__(self, other): |
|
470 | 492 | try: |
|
471 | return self.stat == other.stat | |
|
493 | # Only dev, ino, size, mtime and atime are likely to change. Out | |
|
494 | # of these, we shouldn't compare atime but should compare the | |
|
495 | # rest. However, one of the other fields changing indicates | |
|
496 | # something fishy going on, so return False if anything but atime | |
|
497 | # changes. | |
|
498 | return (self.stat.st_mode == other.stat.st_mode and | |
|
499 | self.stat.st_ino == other.stat.st_ino and | |
|
500 | self.stat.st_dev == other.stat.st_dev and | |
|
501 | self.stat.st_nlink == other.stat.st_nlink and | |
|
502 | self.stat.st_uid == other.stat.st_uid and | |
|
503 | self.stat.st_gid == other.stat.st_gid and | |
|
504 | self.stat.st_size == other.stat.st_size and | |
|
505 | self.stat.st_mtime == other.stat.st_mtime and | |
|
506 | self.stat.st_ctime == other.stat.st_ctime) | |
|
472 | 507 | except AttributeError: |
|
473 | 508 | return False |
|
474 | 509 | |
@@ -477,3 +512,43 b' class cachestat(object):' | |||
|
477 | 512 | |
|
478 | 513 | def executablepath(): |
|
479 | 514 | return None # available on Windows only |
|
515 | ||
|
516 | class unixdomainserver(socket.socket): | |
|
517 | def __init__(self, join, subsystem): | |
|
518 | '''Create a unix domain socket with the given prefix.''' | |
|
519 | super(unixdomainserver, self).__init__(socket.AF_UNIX) | |
|
520 | sockname = subsystem + '.sock' | |
|
521 | self.realpath = self.path = join(sockname) | |
|
522 | if os.path.islink(self.path): | |
|
523 | if os.path.exists(self.path): | |
|
524 | self.realpath = os.readlink(self.path) | |
|
525 | else: | |
|
526 | os.unlink(self.path) | |
|
527 | try: | |
|
528 | self.bind(self.realpath) | |
|
529 | except socket.error, err: | |
|
530 | if err.args[0] == 'AF_UNIX path too long': | |
|
531 | tmpdir = tempfile.mkdtemp(prefix='hg-%s-' % subsystem) | |
|
532 | self.realpath = os.path.join(tmpdir, sockname) | |
|
533 | try: | |
|
534 | self.bind(self.realpath) | |
|
535 | os.symlink(self.realpath, self.path) | |
|
536 | except (OSError, socket.error): | |
|
537 | self.cleanup() | |
|
538 | raise | |
|
539 | else: | |
|
540 | raise | |
|
541 | self.listen(5) | |
|
542 | ||
|
543 | def cleanup(self): | |
|
544 | def okayifmissing(f, path): | |
|
545 | try: | |
|
546 | f(path) | |
|
547 | except OSError, err: | |
|
548 | if err.errno != errno.ENOENT: | |
|
549 | raise | |
|
550 | ||
|
551 | okayifmissing(os.unlink, self.path) | |
|
552 | if self.realpath != self.path: | |
|
553 | okayifmissing(os.unlink, self.realpath) | |
|
554 | okayifmissing(os.rmdir, os.path.dirname(self.realpath)) |
@@ -6,7 +6,7 b'' | |||
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 |
from mercurial import changegroup |
|
|
9 | from mercurial import changegroup | |
|
10 | 10 | from mercurial.node import short |
|
11 | 11 | from mercurial.i18n import _ |
|
12 | 12 | import os |
@@ -56,10 +56,8 b' def _collectbrokencsets(repo, files, str' | |||
|
56 | 56 | return s |
|
57 | 57 | |
|
58 | 58 | def strip(ui, repo, nodelist, backup="all", topic='backup'): |
|
59 | # It simplifies the logic around updating the branchheads cache if we only | |
|
60 | # have to consider the effect of the stripped revisions and not revisions | |
|
61 | # missing because the cache is out-of-date. | |
|
62 | repo.updatebranchcache() | |
|
59 | repo = repo.unfiltered() | |
|
60 | repo.destroying() | |
|
63 | 61 | |
|
64 | 62 | cl = repo.changelog |
|
65 | 63 | # TODO handle undo of merge sets |
@@ -68,17 +66,6 b' def strip(ui, repo, nodelist, backup="al' | |||
|
68 | 66 | striplist = [cl.rev(node) for node in nodelist] |
|
69 | 67 | striprev = min(striplist) |
|
70 | 68 | |
|
71 | # Generate set of branches who will have nodes stripped. | |
|
72 | striprevs = repo.revs("%ld::", striplist) | |
|
73 | stripbranches = set([repo[rev].branch() for rev in striprevs]) | |
|
74 | ||
|
75 | # Set of potential new heads resulting from the strip. The parents of any | |
|
76 | # node removed could be a new head because the node to be removed could have | |
|
77 | # been the only child of the parent. | |
|
78 | newheadrevs = repo.revs("parents(%ld::) - %ld::", striprevs, striprevs) | |
|
79 | newheadnodes = set([cl.node(rev) for rev in newheadrevs]) | |
|
80 | newheadbranches = set([repo[rev].branch() for rev in newheadrevs]) | |
|
81 | ||
|
82 | 69 | keeppartialbundle = backup == 'strip' |
|
83 | 70 | |
|
84 | 71 | # Some revisions with rev > striprev may not be descendants of striprev. |
@@ -111,8 +98,10 b' def strip(ui, repo, nodelist, backup="al' | |||
|
111 | 98 | saverevs.difference_update(descendants) |
|
112 | 99 | savebases = [cl.node(r) for r in saverevs] |
|
113 | 100 | stripbases = [cl.node(r) for r in tostrip] |
|
114 | newbmtarget = repo.revs('sort(heads((::%ld) - (%ld)), -rev)', | |
|
115 | tostrip, tostrip) | |
|
101 | ||
|
102 | # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but | |
|
103 | # is much faster | |
|
104 | newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip) | |
|
116 | 105 | if newbmtarget: |
|
117 | 106 | newbmtarget = repo[newbmtarget[0]].node() |
|
118 | 107 | else: |
@@ -181,7 +170,7 b' def strip(ui, repo, nodelist, backup="al' | |||
|
181 | 170 | |
|
182 | 171 | for m in updatebm: |
|
183 | 172 | bm[m] = repo[newbmtarget].node() |
|
184 |
b |
|
|
173 | bm.write() | |
|
185 | 174 | except: # re-raises |
|
186 | 175 | if backupfile: |
|
187 | 176 | ui.warn(_("strip failed, full bundle stored in '%s'\n") |
@@ -191,10 +180,4 b' def strip(ui, repo, nodelist, backup="al' | |||
|
191 | 180 | % chgrpfile) |
|
192 | 181 | raise |
|
193 | 182 | |
|
194 | if len(stripbranches) == 1 and len(newheadbranches) == 1 \ | |
|
195 | and stripbranches == newheadbranches: | |
|
196 | repo.destroyed(newheadnodes) | |
|
197 | else: | |
|
198 | # Multiple branches involved in strip. Will allow branchcache to become | |
|
199 | # invalid and later on rebuilt from scratch | |
|
200 | repo.destroyed() | |
|
183 | repo.destroyed() |
@@ -257,11 +257,14 b' class revlog(object):' | |||
|
257 | 257 | return iter(xrange(len(self))) |
|
258 | 258 | def revs(self, start=0, stop=None): |
|
259 | 259 | """iterate over all rev in this revlog (from start to stop)""" |
|
260 |
|
|
|
261 | stop = len(self) | |
|
260 | step = 1 | |
|
261 | if stop is not None: | |
|
262 | if start > stop: | |
|
263 | step = -1 | |
|
264 | stop += step | |
|
262 | 265 | else: |
|
263 |
stop |
|
|
264 | return xrange(start, stop) | |
|
266 | stop = len(self) | |
|
267 | return xrange(start, stop, step) | |
|
265 | 268 | |
|
266 | 269 | @util.propertycache |
|
267 | 270 | def nodemap(self): |
@@ -338,33 +341,14 b' class revlog(object):' | |||
|
338 | 341 | return len(t) |
|
339 | 342 | size = rawsize |
|
340 | 343 | |
|
341 | def ancestors(self, revs, stoprev=0): | |
|
344 | def ancestors(self, revs, stoprev=0, inclusive=False): | |
|
342 | 345 | """Generate the ancestors of 'revs' in reverse topological order. |
|
343 | 346 | Does not generate revs lower than stoprev. |
|
344 | 347 | |
|
345 | Yield a sequence of revision numbers starting with the parents | |
|
346 | of each revision in revs, i.e., each revision is *not* considered | |
|
347 | an ancestor of itself. Results are in breadth-first order: | |
|
348 | parents of each rev in revs, then parents of those, etc. Result | |
|
349 | does not include the null revision.""" | |
|
350 | visit = util.deque(revs) | |
|
351 | seen = set([nullrev]) | |
|
352 | while visit: | |
|
353 | for parent in self.parentrevs(visit.popleft()): | |
|
354 | if parent < stoprev: | |
|
355 | continue | |
|
356 | if parent not in seen: | |
|
357 | visit.append(parent) | |
|
358 | seen.add(parent) | |
|
359 | yield parent | |
|
348 | See the documentation for ancestor.lazyancestors for more details.""" | |
|
360 | 349 | |
|
361 |
|
|
|
362 | """Identical to ancestors() except it also generates the | |
|
363 | revisions, 'revs'""" | |
|
364 | for rev in revs: | |
|
365 | yield rev | |
|
366 | for rev in self.ancestors(revs, stoprev): | |
|
367 | yield rev | |
|
350 | return ancestor.lazyancestors(self, revs, stoprev=stoprev, | |
|
351 | inclusive=inclusive) | |
|
368 | 352 | |
|
369 | 353 | def descendants(self, revs): |
|
370 | 354 | """Generate the descendants of 'revs' in revision order. |
@@ -429,6 +413,29 b' class revlog(object):' | |||
|
429 | 413 | missing.sort() |
|
430 | 414 | return has, [self.node(r) for r in missing] |
|
431 | 415 | |
|
416 | def findmissingrevs(self, common=None, heads=None): | |
|
417 | """Return the revision numbers of the ancestors of heads that | |
|
418 | are not ancestors of common. | |
|
419 | ||
|
420 | More specifically, return a list of revision numbers corresponding to | |
|
421 | nodes N such that every N satisfies the following constraints: | |
|
422 | ||
|
423 | 1. N is an ancestor of some node in 'heads' | |
|
424 | 2. N is not an ancestor of any node in 'common' | |
|
425 | ||
|
426 | The list is sorted by revision number, meaning it is | |
|
427 | topologically sorted. | |
|
428 | ||
|
429 | 'heads' and 'common' are both lists of revision numbers. If heads is | |
|
430 | not supplied, uses all of the revlog's heads. If common is not | |
|
431 | supplied, uses nullid.""" | |
|
432 | if common is None: | |
|
433 | common = [nullrev] | |
|
434 | if heads is None: | |
|
435 | heads = self.headrevs() | |
|
436 | ||
|
437 | return ancestor.missingancestors(heads, common, self.parentrevs) | |
|
438 | ||
|
432 | 439 | def findmissing(self, common=None, heads=None): |
|
433 | 440 | """Return the ancestors of heads that are not ancestors of common. |
|
434 | 441 | |
@@ -444,8 +451,16 b' class revlog(object):' | |||
|
444 | 451 | 'heads' and 'common' are both lists of node IDs. If heads is |
|
445 | 452 | not supplied, uses all of the revlog's heads. If common is not |
|
446 | 453 | supplied, uses nullid.""" |
|
447 | _common, missing = self.findcommonmissing(common, heads) | |
|
448 | return missing | |
|
454 | if common is None: | |
|
455 | common = [nullid] | |
|
456 | if heads is None: | |
|
457 | heads = self.heads() | |
|
458 | ||
|
459 | common = [self.rev(n) for n in common] | |
|
460 | heads = [self.rev(n) for n in heads] | |
|
461 | ||
|
462 | return [self.node(r) for r in | |
|
463 | ancestor.missingancestors(heads, common, self.parentrevs)] | |
|
449 | 464 | |
|
450 | 465 | def nodesbetween(self, roots=None, heads=None): |
|
451 | 466 | """Return a topological path from 'roots' to 'heads'. |
@@ -13,6 +13,7 b' import match as matchmod' | |||
|
13 | 13 | from i18n import _ |
|
14 | 14 | import encoding |
|
15 | 15 | import obsolete as obsmod |
|
16 | import repoview | |
|
16 | 17 | |
|
17 | 18 | def _revancestors(repo, revs, followfirst): |
|
18 | 19 | """Like revlog.ancestors(), but supports followfirst.""" |
@@ -442,6 +443,18 b' def bumped(repo, subset, x):' | |||
|
442 | 443 | bumped = obsmod.getrevs(repo, 'bumped') |
|
443 | 444 | return [r for r in subset if r in bumped] |
|
444 | 445 | |
|
446 | def bundle(repo, subset, x): | |
|
447 | """``bundle()`` | |
|
448 | Changesets in the bundle. | |
|
449 | ||
|
450 | Bundle must be specified by the -R option.""" | |
|
451 | ||
|
452 | try: | |
|
453 | bundlerevs = repo.changelog.bundlerevs | |
|
454 | except AttributeError: | |
|
455 | raise util.Abort(_("no bundle provided - specify with -R")) | |
|
456 | return [r for r in subset if r in bundlerevs] | |
|
457 | ||
|
445 | 458 | def checkstatus(repo, subset, pat, field): |
|
446 | 459 | m = None |
|
447 | 460 | s = [] |
@@ -475,8 +488,13 b' def checkstatus(repo, subset, pat, field' | |||
|
475 | 488 | |
|
476 | 489 | def _children(repo, narrow, parentset): |
|
477 | 490 | cs = set() |
|
491 | if not parentset: | |
|
492 | return cs | |
|
478 | 493 | pr = repo.changelog.parentrevs |
|
494 | minrev = min(parentset) | |
|
479 | 495 | for r in narrow: |
|
496 | if r <= minrev: | |
|
497 | continue | |
|
480 | 498 | for p in pr(r): |
|
481 | 499 | if p in parentset: |
|
482 | 500 | cs.add(r) |
@@ -628,6 +646,15 b' def destination(repo, subset, x):' | |||
|
628 | 646 | |
|
629 | 647 | return [r for r in subset if r in dests] |
|
630 | 648 | |
|
649 | def divergent(repo, subset, x): | |
|
650 | """``divergent()`` | |
|
651 | Final successors of changesets with an alternative set of final successors. | |
|
652 | """ | |
|
653 | # i18n: "divergent" is a keyword | |
|
654 | getargs(x, 0, 0, _("divergent takes no arguments")) | |
|
655 | divergent = obsmod.getrevs(repo, 'divergent') | |
|
656 | return [r for r in subset if r in divergent] | |
|
657 | ||
|
631 | 658 | def draft(repo, subset, x): |
|
632 | 659 | """``draft()`` |
|
633 | 660 | Changeset in draft phase.""" |
@@ -865,7 +892,8 b' def hidden(repo, subset, x):' | |||
|
865 | 892 | """ |
|
866 | 893 | # i18n: "hidden" is a keyword |
|
867 | 894 | getargs(x, 0, 0, _("hidden takes no arguments")) |
|
868 | return [r for r in subset if r in repo.hiddenrevs] | |
|
895 | hiddenrevs = repoview.filterrevs(repo, 'visible') | |
|
896 | return [r for r in subset if r in hiddenrevs] | |
|
869 | 897 | |
|
870 | 898 | def keyword(repo, subset, x): |
|
871 | 899 | """``keyword(string)`` |
@@ -1513,6 +1541,7 b' symbols = {' | |||
|
1513 | 1541 | "branch": branch, |
|
1514 | 1542 | "branchpoint": branchpoint, |
|
1515 | 1543 | "bumped": bumped, |
|
1544 | "bundle": bundle, | |
|
1516 | 1545 | "children": children, |
|
1517 | 1546 | "closed": closed, |
|
1518 | 1547 | "contains": contains, |
@@ -1522,6 +1551,7 b' symbols = {' | |||
|
1522 | 1551 | "descendants": descendants, |
|
1523 | 1552 | "_firstdescendants": _firstdescendants, |
|
1524 | 1553 | "destination": destination, |
|
1554 | "divergent": divergent, | |
|
1525 | 1555 | "draft": draft, |
|
1526 | 1556 | "extinct": extinct, |
|
1527 | 1557 | "extra": extra, |
@@ -252,9 +252,9 b' class vfs(abstractvfs):' | |||
|
252 | 252 | def _setmustaudit(self, onoff): |
|
253 | 253 | self._audit = onoff |
|
254 | 254 | if onoff: |
|
255 |
self.audit |
|
|
255 | self.audit = pathauditor(self.base) | |
|
256 | 256 | else: |
|
257 |
self.audit |
|
|
257 | self.audit = util.always | |
|
258 | 258 | |
|
259 | 259 | mustaudit = property(_getmustaudit, _setmustaudit) |
|
260 | 260 | |
@@ -276,51 +276,52 b' class vfs(abstractvfs):' | |||
|
276 | 276 | r = util.checkosfilename(path) |
|
277 | 277 | if r: |
|
278 | 278 | raise util.Abort("%s: %r" % (r, path)) |
|
279 |
self.audit |
|
|
279 | self.audit(path) | |
|
280 | 280 | f = self.join(path) |
|
281 | 281 | |
|
282 | 282 | if not text and "b" not in mode: |
|
283 | 283 | mode += "b" # for that other OS |
|
284 | 284 | |
|
285 | 285 | nlink = -1 |
|
286 | dirname, basename = util.split(f) | |
|
287 | # If basename is empty, then the path is malformed because it points | |
|
288 | # to a directory. Let the posixfile() call below raise IOError. | |
|
289 | if basename and mode not in ('r', 'rb'): | |
|
290 |
if |
|
|
291 |
if |
|
|
292 |
|
|
|
293 |
|
|
|
294 | try: | |
|
295 |
|
|
|
296 |
|
|
|
286 | if mode not in ('r', 'rb'): | |
|
287 | dirname, basename = util.split(f) | |
|
288 | # If basename is empty, then the path is malformed because it points | |
|
289 | # to a directory. Let the posixfile() call below raise IOError. | |
|
290 | if basename: | |
|
291 | if atomictemp: | |
|
292 | if not os.path.isdir(dirname): | |
|
293 | util.makedirs(dirname, self.createmode) | |
|
294 | return util.atomictempfile(f, mode, self.createmode) | |
|
295 | try: | |
|
296 | if 'w' in mode: | |
|
297 | util.unlink(f) | |
|
298 | nlink = 0 | |
|
299 | else: | |
|
300 | # nlinks() may behave differently for files on Windows | |
|
301 | # shares if the file is open. | |
|
302 | fd = util.posixfile(f) | |
|
303 | nlink = util.nlinks(f) | |
|
304 | if nlink < 1: | |
|
305 | nlink = 2 # force mktempcopy (issue1922) | |
|
306 | fd.close() | |
|
307 | except (OSError, IOError), e: | |
|
308 | if e.errno != errno.ENOENT: | |
|
309 | raise | |
|
297 | 310 | nlink = 0 |
|
298 | else: | |
|
299 | # nlinks() may behave differently for files on Windows | |
|
300 | # shares if the file is open. | |
|
301 | fd = util.posixfile(f) | |
|
302 |
nlink = util.nlink |
|
|
303 |
if nlink |
|
|
304 |
|
|
|
305 | fd.close() | |
|
306 | except (OSError, IOError), e: | |
|
307 | if e.errno != errno.ENOENT: | |
|
308 | raise | |
|
309 | nlink = 0 | |
|
310 | if not os.path.isdir(dirname): | |
|
311 | util.makedirs(dirname, self.createmode) | |
|
312 | if nlink > 0: | |
|
313 | if self._trustnlink is None: | |
|
314 | self._trustnlink = nlink > 1 or util.checknlink(f) | |
|
315 | if nlink > 1 or not self._trustnlink: | |
|
316 | util.rename(util.mktempcopy(f), f) | |
|
311 | if not os.path.isdir(dirname): | |
|
312 | util.makedirs(dirname, self.createmode) | |
|
313 | if nlink > 0: | |
|
314 | if self._trustnlink is None: | |
|
315 | self._trustnlink = nlink > 1 or util.checknlink(f) | |
|
316 | if nlink > 1 or not self._trustnlink: | |
|
317 | util.rename(util.mktempcopy(f), f) | |
|
317 | 318 | fp = util.posixfile(f, mode) |
|
318 | 319 | if nlink == 0: |
|
319 | 320 | self._fixfilemode(f) |
|
320 | 321 | return fp |
|
321 | 322 | |
|
322 | 323 | def symlink(self, src, dst): |
|
323 |
self.audit |
|
|
324 | self.audit(dst) | |
|
324 | 325 | linkname = self.join(dst) |
|
325 | 326 | try: |
|
326 | 327 | os.unlink(linkname) |
@@ -340,9 +341,6 b' class vfs(abstractvfs):' | |||
|
340 | 341 | else: |
|
341 | 342 | self.write(dst, src) |
|
342 | 343 | |
|
343 | def audit(self, path): | |
|
344 | self.auditor(path) | |
|
345 | ||
|
346 | 344 | def join(self, path): |
|
347 | 345 | if path: |
|
348 | 346 | return os.path.join(self.base, path) |
@@ -381,6 +379,18 b' class filtervfs(abstractvfs, auditvfs):' | |||
|
381 | 379 | |
|
382 | 380 | filteropener = filtervfs |
|
383 | 381 | |
|
382 | class readonlyvfs(abstractvfs, auditvfs): | |
|
383 | '''Wrapper vfs preventing any writing.''' | |
|
384 | ||
|
385 | def __init__(self, vfs): | |
|
386 | auditvfs.__init__(self, vfs) | |
|
387 | ||
|
388 | def __call__(self, path, mode='r', *args, **kw): | |
|
389 | if mode not in ('r', 'rb'): | |
|
390 | raise util.Abort('this vfs is read only') | |
|
391 | return self.vfs(path, mode, *args, **kw) | |
|
392 | ||
|
393 | ||
|
384 | 394 | def canonpath(root, cwd, myname, auditor=None): |
|
385 | 395 | '''return the canonical path of myname, given cwd and root''' |
|
386 | 396 | if util.endswithsep(root): |
@@ -425,7 +435,7 b' def canonpath(root, cwd, myname, auditor' | |||
|
425 | 435 | break |
|
426 | 436 | name = dirname |
|
427 | 437 | |
|
428 |
raise util.Abort('%s |
|
|
438 | raise util.Abort(_("%s not under root '%s'") % (myname, root)) | |
|
429 | 439 | |
|
430 | 440 | def walkrepos(path, followsym=False, seen_dirs=None, recurse=False): |
|
431 | 441 | '''yield every hg repository under path, always recursively. |
@@ -637,13 +647,13 b' def revrange(repo, revs):' | |||
|
637 | 647 | start, end = spec.split(_revrangesep, 1) |
|
638 | 648 | start = revfix(repo, start, 0) |
|
639 | 649 | end = revfix(repo, end, len(repo) - 1) |
|
640 | step = start > end and -1 or 1 | |
|
650 | rangeiter = repo.changelog.revs(start, end) | |
|
641 | 651 | if not seen and not l: |
|
642 | 652 | # by far the most common case: revs = ["-1:0"] |
|
643 |
l = |
|
|
653 | l = list(rangeiter) | |
|
644 | 654 | # defer syncing seen until next iteration |
|
645 | 655 | continue |
|
646 |
newrevs = set( |
|
|
656 | newrevs = set(rangeiter) | |
|
647 | 657 | if seen: |
|
648 | 658 | newrevs.difference_update(seen) |
|
649 | 659 | seen.update(newrevs) |
@@ -850,15 +860,19 b' def readrequires(opener, supported):' | |||
|
850 | 860 | return requirements |
|
851 | 861 | |
|
852 | 862 | class filecacheentry(object): |
|
853 | def __init__(self, path): | |
|
863 | def __init__(self, path, stat=True): | |
|
854 | 864 | self.path = path |
|
855 |
self.cachestat = |
|
|
865 | self.cachestat = None | |
|
866 | self._cacheable = None | |
|
856 | 867 | |
|
857 |
if |
|
|
858 | self._cacheable = self.cachestat.cacheable() | |
|
859 | else: | |
|
860 | # None means we don't know yet | |
|
861 |
self._cacheable = |
|
|
868 | if stat: | |
|
869 | self.cachestat = filecacheentry.stat(self.path) | |
|
870 | ||
|
871 | if self.cachestat: | |
|
872 | self._cacheable = self.cachestat.cacheable() | |
|
873 | else: | |
|
874 | # None means we don't know yet | |
|
875 | self._cacheable = None | |
|
862 | 876 | |
|
863 | 877 | def refresh(self): |
|
864 | 878 | if self.cacheable(): |
@@ -933,6 +947,7 b' class filecache(object):' | |||
|
933 | 947 | def __get__(self, obj, type=None): |
|
934 | 948 | # do we need to check if the file changed? |
|
935 | 949 | if self.name in obj.__dict__: |
|
950 | assert self.name in obj._filecache, self.name | |
|
936 | 951 | return obj.__dict__[self.name] |
|
937 | 952 | |
|
938 | 953 | entry = obj._filecache.get(self.name) |
@@ -954,12 +969,19 b' class filecache(object):' | |||
|
954 | 969 | return entry.obj |
|
955 | 970 | |
|
956 | 971 | def __set__(self, obj, value): |
|
957 | if self.name in obj._filecache: | |
|
958 | obj._filecache[self.name].obj = value # update cached copy | |
|
972 | if self.name not in obj._filecache: | |
|
973 | # we add an entry for the missing value because X in __dict__ | |
|
974 | # implies X in _filecache | |
|
975 | ce = filecacheentry(self.join(obj, self.path), False) | |
|
976 | obj._filecache[self.name] = ce | |
|
977 | else: | |
|
978 | ce = obj._filecache[self.name] | |
|
979 | ||
|
980 | ce.obj = value # update cached copy | |
|
959 | 981 | obj.__dict__[self.name] = value # update copy returned by obj.x |
|
960 | 982 | |
|
961 | 983 | def __delete__(self, obj): |
|
962 | 984 | try: |
|
963 | 985 | del obj.__dict__[self.name] |
|
964 | 986 | except KeyError: |
|
965 |
raise AttributeError |
|
|
987 | raise AttributeError(self.name) |
@@ -134,8 +134,7 b' class statichttprepository(localrepo.loc' | |||
|
134 | 134 | self.changelog = changelog.changelog(self.sopener) |
|
135 | 135 | self._tags = None |
|
136 | 136 | self.nodetagscache = None |
|
137 |
self._branchcache = |
|
|
138 | self._branchcachetip = None | |
|
137 | self._branchcaches = {} | |
|
139 | 138 | self.encodepats = None |
|
140 | 139 | self.decodepats = None |
|
141 | 140 |
@@ -76,7 +76,7 b' def _buildencodefun():' | |||
|
76 | 76 | cmap = dict([(chr(x), chr(x)) for x in xrange(127)]) |
|
77 | 77 | for x in (range(32) + range(126, 256) + winreserved): |
|
78 | 78 | cmap[chr(x)] = "~%02x" % x |
|
79 | for x in range(ord("A"), ord("Z")+1) + [ord(e)]: | |
|
79 | for x in range(ord("A"), ord("Z") + 1) + [ord(e)]: | |
|
80 | 80 | cmap[chr(x)] = e + chr(x).lower() |
|
81 | 81 | dmap = {} |
|
82 | 82 | for k, v in cmap.iteritems(): |
@@ -128,11 +128,11 b' def _buildlowerencodefun():' | |||
|
128 | 128 | cmap = dict([(chr(x), chr(x)) for x in xrange(127)]) |
|
129 | 129 | for x in (range(32) + range(126, 256) + winreserved): |
|
130 | 130 | cmap[chr(x)] = "~%02x" % x |
|
131 | for x in range(ord("A"), ord("Z")+1): | |
|
131 | for x in range(ord("A"), ord("Z") + 1): | |
|
132 | 132 | cmap[chr(x)] = chr(x).lower() |
|
133 | 133 | return lambda s: "".join([cmap[c] for c in s]) |
|
134 | 134 | |
|
135 | lowerencode = _buildlowerencodefun() | |
|
135 | lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun() | |
|
136 | 136 | |
|
137 | 137 | # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9 |
|
138 | 138 | _winres3 = ('aux', 'con', 'prn', 'nul') # length 3 |
@@ -255,22 +255,17 b' def _hybridencode(path, dotencode):' | |||
|
255 | 255 | return res |
|
256 | 256 | |
|
257 | 257 | def _pathencode(path): |
|
258 | de = encodedir(path) | |
|
258 | 259 | if len(path) > _maxstorepathlen: |
|
259 |
return |
|
|
260 |
ef = _encodefname( |
|
|
260 | return _hashencode(de, True) | |
|
261 | ef = _encodefname(de).split('/') | |
|
261 | 262 | res = '/'.join(_auxencode(ef, True)) |
|
262 | 263 | if len(res) > _maxstorepathlen: |
|
263 |
return |
|
|
264 | return _hashencode(de, True) | |
|
264 | 265 | return res |
|
265 | 266 | |
|
266 | 267 | _pathencode = getattr(parsers, 'pathencode', _pathencode) |
|
267 | 268 | |
|
268 | def _dothybridencode(f): | |
|
269 | ef = _pathencode(f) | |
|
270 | if ef is None: | |
|
271 | return _hashencode(encodedir(f), True) | |
|
272 | return ef | |
|
273 | ||
|
274 | 269 | def _plainhybridencode(f): |
|
275 | 270 | return _hybridencode(f, False) |
|
276 | 271 | |
@@ -456,7 +451,7 b' class _fncachevfs(scmutil.abstractvfs, s' | |||
|
456 | 451 | class fncachestore(basicstore): |
|
457 | 452 | def __init__(self, path, vfstype, dotencode): |
|
458 | 453 | if dotencode: |
|
459 |
encode = _ |
|
|
454 | encode = _pathencode | |
|
460 | 455 | else: |
|
461 | 456 | encode = _plainhybridencode |
|
462 | 457 | self.encode = encode |
@@ -14,6 +14,27 b' propertycache = util.propertycache' | |||
|
14 | 14 | |
|
15 | 15 | nullstate = ('', '', 'empty') |
|
16 | 16 | |
|
17 | class SubrepoAbort(error.Abort): | |
|
18 | """Exception class used to avoid handling a subrepo error more than once""" | |
|
19 | def __init__(self, *args, **kw): | |
|
20 | error.Abort.__init__(self, *args, **kw) | |
|
21 | self.subrepo = kw.get('subrepo') | |
|
22 | ||
|
23 | def annotatesubrepoerror(func): | |
|
24 | def decoratedmethod(self, *args, **kargs): | |
|
25 | try: | |
|
26 | res = func(self, *args, **kargs) | |
|
27 | except SubrepoAbort, ex: | |
|
28 | # This exception has already been handled | |
|
29 | raise ex | |
|
30 | except error.Abort, ex: | |
|
31 | subrepo = subrelpath(self) | |
|
32 | errormsg = str(ex) + ' ' + _('(in subrepo %s)') % subrepo | |
|
33 | # avoid handling this exception by raising a SubrepoAbort exception | |
|
34 | raise SubrepoAbort(errormsg, hint=ex.hint, subrepo=subrepo) | |
|
35 | return res | |
|
36 | return decoratedmethod | |
|
37 | ||
|
17 | 38 | def state(ctx, ui): |
|
18 | 39 | """return a state dict, mapping subrepo paths configured in .hgsub |
|
19 | 40 | to tuple: (source from .hgsub, revision from .hgsubstate, kind |
@@ -126,7 +147,7 b' def submerge(repo, wctx, mctx, actx, ove' | |||
|
126 | 147 | r = "%s:%s:%s" % r |
|
127 | 148 | repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r)) |
|
128 | 149 | |
|
129 | for s, l in s1.items(): | |
|
150 | for s, l in sorted(s1.iteritems()): | |
|
130 | 151 | a = sa.get(s, nullstate) |
|
131 | 152 | ld = l # local state with possible dirty flag for compares |
|
132 | 153 | if wctx.sub(s).dirty(): |
@@ -244,8 +265,7 b' def _abssource(repo, push=False, abort=T' | |||
|
244 | 265 | if repo.ui.config('paths', 'default'): |
|
245 | 266 | return repo.ui.config('paths', 'default') |
|
246 | 267 | if abort: |
|
247 |
raise util.Abort(_("default path for subrepository |
|
|
248 | reporelpath(repo)) | |
|
268 | raise util.Abort(_("default path for subrepository not found")) | |
|
249 | 269 | |
|
250 | 270 | def itersubrepos(ctx1, ctx2): |
|
251 | 271 | """find subrepos in ctx1 or ctx2""" |
@@ -402,6 +422,7 b' class hgsubrepo(abstractsubrepo):' | |||
|
402 | 422 | self._repo.ui.setconfig(s, k, v) |
|
403 | 423 | self._initrepo(r, state[0], create) |
|
404 | 424 | |
|
425 | @annotatesubrepoerror | |
|
405 | 426 | def _initrepo(self, parentrepo, source, create): |
|
406 | 427 | self._repo._subparent = parentrepo |
|
407 | 428 | self._repo._subsource = source |
@@ -422,10 +443,12 b' class hgsubrepo(abstractsubrepo):' | |||
|
422 | 443 | addpathconfig('default-push', defpushpath) |
|
423 | 444 | fp.close() |
|
424 | 445 | |
|
446 | @annotatesubrepoerror | |
|
425 | 447 | def add(self, ui, match, dryrun, listsubrepos, prefix, explicitonly): |
|
426 | 448 | return cmdutil.add(ui, self._repo, match, dryrun, listsubrepos, |
|
427 | 449 | os.path.join(prefix, self._path), explicitonly) |
|
428 | 450 | |
|
451 | @annotatesubrepoerror | |
|
429 | 452 | def status(self, rev2, **opts): |
|
430 | 453 | try: |
|
431 | 454 | rev1 = self._state[1] |
@@ -437,6 +460,7 b' class hgsubrepo(abstractsubrepo):' | |||
|
437 | 460 | % (inst, subrelpath(self))) |
|
438 | 461 | return [], [], [], [], [], [], [] |
|
439 | 462 | |
|
463 | @annotatesubrepoerror | |
|
440 | 464 | def diff(self, ui, diffopts, node2, match, prefix, **opts): |
|
441 | 465 | try: |
|
442 | 466 | node1 = node.bin(self._state[1]) |
@@ -446,12 +470,13 b' class hgsubrepo(abstractsubrepo):' | |||
|
446 | 470 | node2 = node.bin(node2) |
|
447 | 471 | cmdutil.diffordiffstat(ui, self._repo, diffopts, |
|
448 | 472 | node1, node2, match, |
|
449 |
prefix= |
|
|
473 | prefix=posixpath.join(prefix, self._path), | |
|
450 | 474 | listsubrepos=True, **opts) |
|
451 | 475 | except error.RepoLookupError, inst: |
|
452 | 476 | self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n') |
|
453 | 477 | % (inst, subrelpath(self))) |
|
454 | 478 | |
|
479 | @annotatesubrepoerror | |
|
455 | 480 | def archive(self, ui, archiver, prefix, match=None): |
|
456 | 481 | self._get(self._state + ('hg',)) |
|
457 | 482 | abstractsubrepo.archive(self, ui, archiver, prefix, match) |
@@ -463,6 +488,7 b' class hgsubrepo(abstractsubrepo):' | |||
|
463 | 488 | submatch = matchmod.narrowmatcher(subpath, match) |
|
464 | 489 | s.archive(ui, archiver, os.path.join(prefix, self._path), submatch) |
|
465 | 490 | |
|
491 | @annotatesubrepoerror | |
|
466 | 492 | def dirty(self, ignoreupdate=False): |
|
467 | 493 | r = self._state[1] |
|
468 | 494 | if r == '' and not ignoreupdate: # no state recorded |
@@ -479,6 +505,7 b' class hgsubrepo(abstractsubrepo):' | |||
|
479 | 505 | def checknested(self, path): |
|
480 | 506 | return self._repo._checknested(self._repo.wjoin(path)) |
|
481 | 507 | |
|
508 | @annotatesubrepoerror | |
|
482 | 509 | def commit(self, text, user, date): |
|
483 | 510 | # don't bother committing in the subrepo if it's only been |
|
484 | 511 | # updated |
@@ -490,6 +517,7 b' class hgsubrepo(abstractsubrepo):' | |||
|
490 | 517 | return self._repo['.'].hex() # different version checked out |
|
491 | 518 | return node.hex(n) |
|
492 | 519 | |
|
520 | @annotatesubrepoerror | |
|
493 | 521 | def remove(self): |
|
494 | 522 | # we can't fully delete the repository as it may contain |
|
495 | 523 | # local-only history |
@@ -519,12 +547,14 b' class hgsubrepo(abstractsubrepo):' | |||
|
519 | 547 | bookmarks.updatefromremote(self._repo.ui, self._repo, other, |
|
520 | 548 | srcurl) |
|
521 | 549 | |
|
550 | @annotatesubrepoerror | |
|
522 | 551 | def get(self, state, overwrite=False): |
|
523 | 552 | self._get(state) |
|
524 | 553 | source, revision, kind = state |
|
525 | 554 | self._repo.ui.debug("getting subrepo %s\n" % self._path) |
|
526 | 555 | hg.updaterepo(self._repo, revision, overwrite) |
|
527 | 556 | |
|
557 | @annotatesubrepoerror | |
|
528 | 558 | def merge(self, state): |
|
529 | 559 | self._get(state) |
|
530 | 560 | cur = self._repo['.'] |
@@ -551,6 +581,7 b' class hgsubrepo(abstractsubrepo):' | |||
|
551 | 581 | else: |
|
552 | 582 | mergefunc() |
|
553 | 583 | |
|
584 | @annotatesubrepoerror | |
|
554 | 585 | def push(self, opts): |
|
555 | 586 | force = opts.get('force') |
|
556 | 587 | newbranch = opts.get('new_branch') |
@@ -569,12 +600,15 b' class hgsubrepo(abstractsubrepo):' | |||
|
569 | 600 | other = hg.peer(self._repo, {'ssh': ssh}, dsturl) |
|
570 | 601 | return self._repo.push(other, force, newbranch=newbranch) |
|
571 | 602 | |
|
603 | @annotatesubrepoerror | |
|
572 | 604 | def outgoing(self, ui, dest, opts): |
|
573 | 605 | return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts) |
|
574 | 606 | |
|
607 | @annotatesubrepoerror | |
|
575 | 608 | def incoming(self, ui, source, opts): |
|
576 | 609 | return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts) |
|
577 | 610 | |
|
611 | @annotatesubrepoerror | |
|
578 | 612 | def files(self): |
|
579 | 613 | rev = self._state[1] |
|
580 | 614 | ctx = self._repo[rev] |
@@ -593,10 +627,12 b' class hgsubrepo(abstractsubrepo):' | |||
|
593 | 627 | ctx = self._repo[None] |
|
594 | 628 | return ctx.walk(match) |
|
595 | 629 | |
|
630 | @annotatesubrepoerror | |
|
596 | 631 | def forget(self, ui, match, prefix): |
|
597 | 632 | return cmdutil.forget(ui, self._repo, match, |
|
598 | 633 | os.path.join(prefix, self._path), True) |
|
599 | 634 | |
|
635 | @annotatesubrepoerror | |
|
600 | 636 | def revert(self, ui, substate, *pats, **opts): |
|
601 | 637 | # reverting a subrepo is a 2 step process: |
|
602 | 638 | # 1. if the no_backup is not set, revert all modified |
@@ -751,6 +787,7 b' class svnsubrepo(abstractsubrepo):' | |||
|
751 | 787 | pass |
|
752 | 788 | return rev |
|
753 | 789 | |
|
790 | @annotatesubrepoerror | |
|
754 | 791 | def commit(self, text, user, date): |
|
755 | 792 | # user and date are out of our hands since svn is centralized |
|
756 | 793 | changed, extchanged, missing = self._wcchanged() |
@@ -778,6 +815,7 b' class svnsubrepo(abstractsubrepo):' | |||
|
778 | 815 | self._ui.status(self._svncommand(['update', '-r', newrev])[0]) |
|
779 | 816 | return newrev |
|
780 | 817 | |
|
818 | @annotatesubrepoerror | |
|
781 | 819 | def remove(self): |
|
782 | 820 | if self.dirty(): |
|
783 | 821 | self._ui.warn(_('not removing repo %s because ' |
@@ -802,6 +840,7 b' class svnsubrepo(abstractsubrepo):' | |||
|
802 | 840 | except OSError: |
|
803 | 841 | pass |
|
804 | 842 | |
|
843 | @annotatesubrepoerror | |
|
805 | 844 | def get(self, state, overwrite=False): |
|
806 | 845 | if overwrite: |
|
807 | 846 | self._svncommand(['revert', '--recursive']) |
@@ -822,6 +861,7 b' class svnsubrepo(abstractsubrepo):' | |||
|
822 | 861 | raise util.Abort((status or err).splitlines()[-1]) |
|
823 | 862 | self._ui.status(status) |
|
824 | 863 | |
|
864 | @annotatesubrepoerror | |
|
825 | 865 | def merge(self, state): |
|
826 | 866 | old = self._state[1] |
|
827 | 867 | new = state[1] |
@@ -835,6 +875,7 b' class svnsubrepo(abstractsubrepo):' | |||
|
835 | 875 | # push is a no-op for SVN |
|
836 | 876 | return True |
|
837 | 877 | |
|
878 | @annotatesubrepoerror | |
|
838 | 879 | def files(self): |
|
839 | 880 | output = self._svncommand(['list', '--recursive', '--xml'])[0] |
|
840 | 881 | doc = xml.dom.minidom.parseString(output) |
@@ -1021,6 +1062,7 b' class gitsubrepo(abstractsubrepo):' | |||
|
1021 | 1062 | raise util.Abort(_("revision %s does not exist in subrepo %s\n") % |
|
1022 | 1063 | (revision, self._relpath)) |
|
1023 | 1064 | |
|
1065 | @annotatesubrepoerror | |
|
1024 | 1066 | def dirty(self, ignoreupdate=False): |
|
1025 | 1067 | if self._gitmissing(): |
|
1026 | 1068 | return self._state[1] != '' |
@@ -1037,6 +1079,7 b' class gitsubrepo(abstractsubrepo):' | |||
|
1037 | 1079 | def basestate(self): |
|
1038 | 1080 | return self._gitstate() |
|
1039 | 1081 | |
|
1082 | @annotatesubrepoerror | |
|
1040 | 1083 | def get(self, state, overwrite=False): |
|
1041 | 1084 | source, revision, kind = state |
|
1042 | 1085 | if not revision: |
@@ -1120,6 +1163,7 b' class gitsubrepo(abstractsubrepo):' | |||
|
1120 | 1163 | # a real merge would be required, just checkout the revision |
|
1121 | 1164 | rawcheckout() |
|
1122 | 1165 | |
|
1166 | @annotatesubrepoerror | |
|
1123 | 1167 | def commit(self, text, user, date): |
|
1124 | 1168 | if self._gitmissing(): |
|
1125 | 1169 | raise util.Abort(_("subrepo %s is missing") % self._relpath) |
@@ -1137,6 +1181,7 b' class gitsubrepo(abstractsubrepo):' | |||
|
1137 | 1181 | # circumstances |
|
1138 | 1182 | return self._gitstate() |
|
1139 | 1183 | |
|
1184 | @annotatesubrepoerror | |
|
1140 | 1185 | def merge(self, state): |
|
1141 | 1186 | source, revision, kind = state |
|
1142 | 1187 | self._fetch(source, revision) |
@@ -1159,6 +1204,7 b' class gitsubrepo(abstractsubrepo):' | |||
|
1159 | 1204 | else: |
|
1160 | 1205 | mergefunc() |
|
1161 | 1206 | |
|
1207 | @annotatesubrepoerror | |
|
1162 | 1208 | def push(self, opts): |
|
1163 | 1209 | force = opts.get('force') |
|
1164 | 1210 | |
@@ -1198,6 +1244,7 b' class gitsubrepo(abstractsubrepo):' | |||
|
1198 | 1244 | (self._relpath, self._state[1])) |
|
1199 | 1245 | return False |
|
1200 | 1246 | |
|
1247 | @annotatesubrepoerror | |
|
1201 | 1248 | def remove(self): |
|
1202 | 1249 | if self._gitmissing(): |
|
1203 | 1250 | return |
@@ -1247,6 +1294,7 b' class gitsubrepo(abstractsubrepo):' | |||
|
1247 | 1294 | ui.progress(_('archiving (%s)') % relpath, None) |
|
1248 | 1295 | |
|
1249 | 1296 | |
|
1297 | @annotatesubrepoerror | |
|
1250 | 1298 | def status(self, rev2, **opts): |
|
1251 | 1299 | rev1 = self._state[1] |
|
1252 | 1300 | if self._gitmissing() or not rev1: |
@@ -8,6 +8,7 b'' | |||
|
8 | 8 | from i18n import _ |
|
9 | 9 | import sys, os, re |
|
10 | 10 | import util, config, templatefilters, parser, error |
|
11 | import types | |
|
11 | 12 | |
|
12 | 13 | # template parsing |
|
13 | 14 | |
@@ -140,6 +141,10 b' def runsymbol(context, mapping, key):' | |||
|
140 | 141 | v = context._defaults.get(key, '') |
|
141 | 142 | if util.safehasattr(v, '__call__'): |
|
142 | 143 | return v(**mapping) |
|
144 | if isinstance(v, types.GeneratorType): | |
|
145 | v = list(v) | |
|
146 | mapping[key] = v | |
|
147 | return v | |
|
143 | 148 | return v |
|
144 | 149 | |
|
145 | 150 | def buildfilter(exp, context): |
@@ -179,6 +184,7 b' def runmap(context, mapping, data):' | |||
|
179 | 184 | for i in d: |
|
180 | 185 | if isinstance(i, dict): |
|
181 | 186 | lm.update(i) |
|
187 | lm['originalnode'] = mapping.get('node') | |
|
182 | 188 | yield runtemplate(context, lm, ctmpl) |
|
183 | 189 | else: |
|
184 | 190 | # v is not an iterable of dicts, this happen when 'key' |
@@ -259,6 +265,15 b' def ifeq(context, mapping, args):' | |||
|
259 | 265 | t = stringify(args[3][0](context, mapping, args[3][1])) |
|
260 | 266 | yield runtemplate(context, mapping, compiletemplate(t, context)) |
|
261 | 267 | |
|
268 | def label(context, mapping, args): | |
|
269 | if len(args) != 2: | |
|
270 | # i18n: "label" is a keyword | |
|
271 | raise error.ParseError(_("label expects two arguments")) | |
|
272 | ||
|
273 | # ignore args[0] (the label string) since this is supposed to be a a no-op | |
|
274 | t = stringify(args[1][0](context, mapping, args[1][1])) | |
|
275 | yield runtemplate(context, mapping, compiletemplate(t, context)) | |
|
276 | ||
|
262 | 277 | methods = { |
|
263 | 278 | "string": lambda e, c: (runstring, e[1]), |
|
264 | 279 | "symbol": lambda e, c: (runsymbol, e[1]), |
@@ -274,6 +289,7 b' funcs = {' | |||
|
274 | 289 | "ifeq": ifeq, |
|
275 | 290 | "join": join, |
|
276 | 291 | "sub": sub, |
|
292 | "label": label, | |
|
277 | 293 | } |
|
278 | 294 | |
|
279 | 295 | # template engine |
@@ -10,4 +10,6 b' tags = tags.tmpl' | |||
|
10 | 10 | tagentry = tagentry.tmpl |
|
11 | 11 | bookmarks = bookmarks.tmpl |
|
12 | 12 | bookmarkentry = bookmarkentry.tmpl |
|
13 | branches = branches.tmpl | |
|
14 | branchentry = branchentry.tmpl | |
|
13 | 15 | error = error.tmpl |
@@ -223,3 +223,4 b' notfound = ../paper/notfound.tmpl' | |||
|
223 | 223 | error = ../paper/error.tmpl |
|
224 | 224 | urlparameter = '{separator}{name}={value|urlescape}' |
|
225 | 225 | hiddenformentry = '<input type="hidden" name="{name}" value="{value|escape}" />' |
|
226 | breadcrumb = '> <a href="{url}">{name}</a> ' |
@@ -8,7 +8,8 b'' | |||
|
8 | 8 | <body> |
|
9 | 9 | |
|
10 | 10 | <div class="page_header"> |
|
11 |
<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> |
|
|
11 | <a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> | |
|
12 | <a href="/">Mercurial</a> {pathdef%breadcrumb} / bookmarks | |
|
12 | 13 | </div> |
|
13 | 14 | |
|
14 | 15 | <div class="page_nav"> |
@@ -1,14 +1,15 b'' | |||
|
1 | 1 | {header} |
|
2 | 2 | <title>{repo|escape}: Branches</title> |
|
3 | 3 | <link rel="alternate" type="application/atom+xml" |
|
4 |
href="{url}atom- |
|
|
4 | href="{url}atom-branches" title="Atom feed for {repo|escape}"/> | |
|
5 | 5 | <link rel="alternate" type="application/rss+xml" |
|
6 |
href="{url}rss- |
|
|
6 | href="{url}rss-branches" title="RSS feed for {repo|escape}"/> | |
|
7 | 7 | </head> |
|
8 | 8 | <body> |
|
9 | 9 | |
|
10 | 10 | <div class="page_header"> |
|
11 |
<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> |
|
|
11 | <a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> | |
|
12 | <a href="/">Mercurial</a> {pathdef%breadcrumb} / branches | |
|
12 | 13 | </div> |
|
13 | 14 | |
|
14 | 15 | <div class="page_nav"> |
@@ -8,7 +8,8 b'' | |||
|
8 | 8 | <body> |
|
9 | 9 | |
|
10 | 10 | <div class="page_header"> |
|
11 |
<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> |
|
|
11 | <a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> | |
|
12 | <a href="/">Mercurial</a> {pathdef%breadcrumb} / changelog | |
|
12 | 13 | </div> |
|
13 | 14 | |
|
14 | 15 | <form action="{url}log"> |
@@ -8,7 +8,8 b'' | |||
|
8 | 8 | <body> |
|
9 | 9 | |
|
10 | 10 | <div class="page_header"> |
|
11 |
<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> |
|
|
11 | <a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> | |
|
12 | <a href="/">Mercurial</a> {pathdef%breadcrumb} / changeset | |
|
12 | 13 | </div> |
|
13 | 14 | |
|
14 | 15 | <div class="page_nav"> |
@@ -8,7 +8,8 b'' | |||
|
8 | 8 | <body> |
|
9 | 9 | |
|
10 | 10 | <div class="page_header"> |
|
11 |
<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> |
|
|
11 | <a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> | |
|
12 | <a href="/">Mercurial</a> {pathdef%breadcrumb} / error | |
|
12 | 13 | </div> |
|
13 | 14 | |
|
14 | 15 | <div class="page_nav"> |
@@ -8,7 +8,8 b'' | |||
|
8 | 8 | <body> |
|
9 | 9 | |
|
10 | 10 | <div class="page_header"> |
|
11 |
<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> |
|
|
11 | <a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> | |
|
12 | <a href="/">Mercurial</a> {pathdef%breadcrumb} / annotate | |
|
12 | 13 | </div> |
|
13 | 14 | |
|
14 | 15 | <div class="page_nav"> |
@@ -8,7 +8,8 b'' | |||
|
8 | 8 | <body> |
|
9 | 9 | |
|
10 | 10 | <div class="page_header"> |
|
11 |
<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> |
|
|
11 | <a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> | |
|
12 | <a href="/">Mercurial</a> {pathdef%breadcrumb} / comparison | |
|
12 | 13 | </div> |
|
13 | 14 | |
|
14 | 15 | <div class="page_nav"> |
@@ -8,7 +8,8 b'' | |||
|
8 | 8 | <body> |
|
9 | 9 | |
|
10 | 10 | <div class="page_header"> |
|
11 |
<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> |
|
|
11 | <a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> | |
|
12 | <a href="/">Mercurial</a> {pathdef%breadcrumb} / diff | |
|
12 | 13 | </div> |
|
13 | 14 | |
|
14 | 15 | <div class="page_nav"> |
@@ -8,7 +8,8 b'' | |||
|
8 | 8 | <body> |
|
9 | 9 | |
|
10 | 10 | <div class="page_header"> |
|
11 |
<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> |
|
|
11 | <a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> | |
|
12 | <a href="/">Mercurial</a> {pathdef%breadcrumb} / file revisions | |
|
12 | 13 | </div> |
|
13 | 14 | |
|
14 | 15 | <div class="page_nav"> |
@@ -8,7 +8,8 b'' | |||
|
8 | 8 | <body> |
|
9 | 9 | |
|
10 | 10 | <div class="page_header"> |
|
11 |
<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> |
|
|
11 | <a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> | |
|
12 | <a href="/">Mercurial</a> {pathdef%breadcrumb} / file revision | |
|
12 | 13 | </div> |
|
13 | 14 | |
|
14 | 15 | <div class="page_nav"> |
@@ -9,7 +9,8 b'' | |||
|
9 | 9 | <body> |
|
10 | 10 | |
|
11 | 11 | <div class="page_header"> |
|
12 |
<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> |
|
|
12 | <a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> | |
|
13 | <a href="/">Mercurial</a> {pathdef%breadcrumb} / graph | |
|
13 | 14 | </div> |
|
14 | 15 | |
|
15 | 16 | <form action="{url}log"> |
@@ -8,7 +8,8 b'' | |||
|
8 | 8 | <body> |
|
9 | 9 | |
|
10 | 10 | <div class="page_header"> |
|
11 |
<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> |
|
|
11 | <a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> | |
|
12 | <a href="/">Mercurial</a> {pathdef%breadcrumb} / help | |
|
12 | 13 | </div> |
|
13 | 14 | |
|
14 | 15 | <div class="page_nav"> |
@@ -8,7 +8,8 b'' | |||
|
8 | 8 | <body> |
|
9 | 9 | |
|
10 | 10 | <div class="page_header"> |
|
11 |
<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> |
|
|
11 | <a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> | |
|
12 | <a href="/">Mercurial</a> {pathdef%breadcrumb} / help | |
|
12 | 13 | </div> |
|
13 | 14 | |
|
14 | 15 | <div class="page_nav"> |
@@ -5,7 +5,7 b'' | |||
|
5 | 5 | |
|
6 | 6 | <div class="page_header"> |
|
7 | 7 | <a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> |
|
8 | Repositories list | |
|
8 | <a href="/">Mercurial</a> {pathdef%breadcrumb} | |
|
9 | 9 | </div> |
|
10 | 10 | |
|
11 | 11 | <table cellspacing="0"> |
@@ -8,7 +8,8 b'' | |||
|
8 | 8 | <body> |
|
9 | 9 | |
|
10 | 10 | <div class="page_header"> |
|
11 |
<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> |
|
|
11 | <a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> | |
|
12 | <a href="/">Mercurial</a> {pathdef%breadcrumb} / files | |
|
12 | 13 | </div> |
|
13 | 14 | |
|
14 | 15 | <div class="page_nav"> |
@@ -294,9 +294,15 b" indexentry = '" | |||
|
294 | 294 | <td>{contact|obfuscate}</td> |
|
295 | 295 | <td class="age">{lastchange|rfc822date}</td> |
|
296 | 296 | <td class="indexlinks">{archives%indexarchiveentry}</td> |
|
297 | <td><div class="rss_logo"><a href="{url}rss-log">RSS</a> <a href="{url}atom-log">Atom</a></div></td> | |
|
297 | <td>{if(isdirectory, '', | |
|
298 | '<div class="rss_logo"> | |
|
299 | <a href="{url}rss-log">RSS</a> <a href="{url}atom-log">Atom</a> | |
|
300 | </div>' | |
|
301 | )} | |
|
302 | </td> | |
|
298 | 303 | </tr>\n' |
|
299 | 304 | indexarchiveentry = ' <a href="{url}archive/{node|short}{extension}">{type|escape}</a> ' |
|
300 | 305 | index = index.tmpl |
|
301 | 306 | urlparameter = '{separator}{name}={value|urlescape}' |
|
302 | 307 | hiddenformentry = '<input type="hidden" name="{name}" value="{value|escape}" />' |
|
308 | breadcrumb = '> <a href="{url}">{name}</a> ' |
@@ -8,7 +8,8 b'' | |||
|
8 | 8 | <body> |
|
9 | 9 | |
|
10 | 10 | <div class="page_header"> |
|
11 |
<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> |
|
|
11 | <a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> | |
|
12 | <a href="/">Mercurial</a> {pathdef%breadcrumb} / search | |
|
12 | 13 | |
|
13 | 14 | <form action="{url}log"> |
|
14 | 15 | {sessionvars%hiddenformentry} |
@@ -8,7 +8,8 b'' | |||
|
8 | 8 | <body> |
|
9 | 9 | |
|
10 | 10 | <div class="page_header"> |
|
11 |
<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> |
|
|
11 | <a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> | |
|
12 | <a href="/">Mercurial</a> {pathdef%breadcrumb} / shortlog | |
|
12 | 13 | </div> |
|
13 | 14 | |
|
14 | 15 | <form action="{url}log"> |
@@ -8,8 +8,8 b'' | |||
|
8 | 8 | <body> |
|
9 | 9 | |
|
10 | 10 | <div class="page_header"> |
|
11 |
<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> |
|
|
12 | ||
|
11 | <a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> | |
|
12 | <a href="/">Mercurial</a> {pathdef%breadcrumb} / summary | |
|
13 | 13 | <form action="{url}log"> |
|
14 | 14 | {sessionvars%hiddenformentry} |
|
15 | 15 | <div class="search"> |
@@ -8,7 +8,8 b'' | |||
|
8 | 8 | <body> |
|
9 | 9 | |
|
10 | 10 | <div class="page_header"> |
|
11 |
<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> |
|
|
11 | <a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> | |
|
12 | <a href="/">Mercurial</a> {pathdef%breadcrumb} / tags | |
|
12 | 13 | </div> |
|
13 | 14 | |
|
14 | 15 | <div class="page_nav"> |
@@ -7,7 +7,7 b'' | |||
|
7 | 7 | <body> |
|
8 | 8 | <div id="container"> |
|
9 | 9 | <div class="page-header"> |
|
10 | <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / bookmarks</h1> | |
|
10 | <h1 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb} / bookmarks</h1> | |
|
11 | 11 | |
|
12 | 12 | <form action="{url}log"> |
|
13 | 13 | {sessionvars%hiddenformentry} |
@@ -1,13 +1,13 b'' | |||
|
1 | 1 | {header} |
|
2 | 2 | <title>{repo|escape}: Branches</title> |
|
3 |
<link rel="alternate" type="application/atom+xml" href="{url}atom- |
|
|
4 |
<link rel="alternate" type="application/rss+xml" href="{url}rss- |
|
|
3 | <link rel="alternate" type="application/atom+xml" href="{url}atom-branches" title="Atom feed for {repo|escape}"/> | |
|
4 | <link rel="alternate" type="application/rss+xml" href="{url}rss-branches" title="RSS feed for {repo|escape}"/> | |
|
5 | 5 | </head> |
|
6 | 6 | |
|
7 | 7 | <body> |
|
8 | 8 | <div id="container"> |
|
9 | 9 | <div class="page-header"> |
|
10 | <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / branches</h1> | |
|
10 | <h1 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb} / branches</h1> | |
|
11 | 11 | |
|
12 | 12 | <form action="{url}log"> |
|
13 | 13 | {sessionvars%hiddenformentry} |
@@ -7,7 +7,7 b'' | |||
|
7 | 7 | <body> |
|
8 | 8 | <div id="container"> |
|
9 | 9 | <div class="page-header"> |
|
10 | <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / changelog</h1> | |
|
10 | <h1 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb} / changelog</h1> | |
|
11 | 11 | |
|
12 | 12 | <form action="{url}log"> |
|
13 | 13 | {sessionvars%hiddenformentry} |
@@ -7,7 +7,7 b'' | |||
|
7 | 7 | <body> |
|
8 | 8 | <div id="container"> |
|
9 | 9 | <div class="page-header"> |
|
10 | <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / changeset</h1> | |
|
10 | <h1 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb} / changeset</h1> | |
|
11 | 11 | |
|
12 | 12 | <form action="{url}log"> |
|
13 | 13 | {sessionvars%hiddenformentry} |
@@ -7,7 +7,7 b'' | |||
|
7 | 7 | <body> |
|
8 | 8 | <div id="container"> |
|
9 | 9 | <div class="page-header"> |
|
10 | <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / not found: {repo|escape}</h1> | |
|
10 | <h1 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb} / not found: {repo|escape}</h1> | |
|
11 | 11 | |
|
12 | 12 | <form action="{url}log"> |
|
13 | 13 | {sessionvars%hiddenformentry} |
@@ -7,7 +7,7 b'' | |||
|
7 | 7 | <body> |
|
8 | 8 | <div id="container"> |
|
9 | 9 | <div class="page-header"> |
|
10 | <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / annotate</h1> | |
|
10 | <h1 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb} / annotate</h1> | |
|
11 | 11 | |
|
12 | 12 | <form action="{url}log"> |
|
13 | 13 | {sessionvars%hiddenformentry} |
@@ -7,7 +7,7 b'' | |||
|
7 | 7 | <body> |
|
8 | 8 | <div id="container"> |
|
9 | 9 | <div class="page-header"> |
|
10 | <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / file comparison</h1> | |
|
10 | <h1 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb} / file comparison</h1> | |
|
11 | 11 | |
|
12 | 12 | <form action="{url}log"> |
|
13 | 13 | {sessionvars%hiddenformentry} |
@@ -7,7 +7,7 b'' | |||
|
7 | 7 | <body> |
|
8 | 8 | <div id="container"> |
|
9 | 9 | <div class="page-header"> |
|
10 | <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / file diff</h1> | |
|
10 | <h1 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb} / file diff</h1> | |
|
11 | 11 | |
|
12 | 12 | <form action="{url}log"> |
|
13 | 13 | {sessionvars%hiddenformentry} |
@@ -7,7 +7,7 b'' | |||
|
7 | 7 | <body> |
|
8 | 8 | <div id="container"> |
|
9 | 9 | <div class="page-header"> |
|
10 | <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / file revisions</h1> | |
|
10 | <h1 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb} / file revisions</h1> | |
|
11 | 11 | |
|
12 | 12 | <form action="{url}log"> |
|
13 | 13 | {sessionvars%hiddenformentry} |
@@ -7,7 +7,7 b'' | |||
|
7 | 7 | <body> |
|
8 | 8 | <div id="container"> |
|
9 | 9 | <div class="page-header"> |
|
10 | <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / file revision</h1> | |
|
10 | <h1 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb} / file revision</h1> | |
|
11 | 11 | |
|
12 | 12 | <form action="{url}log"> |
|
13 | 13 | {sessionvars%hiddenformentry} |
@@ -8,7 +8,7 b'' | |||
|
8 | 8 | <body> |
|
9 | 9 | <div id="container"> |
|
10 | 10 | <div class="page-header"> |
|
11 | <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / graph</h1> | |
|
11 | <h1 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb} / graph</h1> | |
|
12 | 12 | |
|
13 | 13 | <form action="{url}log"> |
|
14 | 14 | {sessionvars%hiddenformentry} |
@@ -7,7 +7,7 b'' | |||
|
7 | 7 | <body> |
|
8 | 8 | <div id="container"> |
|
9 | 9 | <div class="page-header"> |
|
10 | <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / help</h1> | |
|
10 | <h1 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb} / help</h1> | |
|
11 | 11 | |
|
12 | 12 | <form action="{url}log"> |
|
13 | 13 | {sessionvars%hiddenformentry} |
@@ -7,7 +7,7 b'' | |||
|
7 | 7 | <body> |
|
8 | 8 | <div id="container"> |
|
9 | 9 | <div class="page-header"> |
|
10 | <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / help</h1> | |
|
10 | <h1 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb} / help</h1> | |
|
11 | 11 | |
|
12 | 12 | <form action="{url}log"> |
|
13 | 13 | {sessionvars%hiddenformentry} |
@@ -5,7 +5,7 b'' | |||
|
5 | 5 | <body> |
|
6 | 6 | <div id="container"> |
|
7 | 7 | <div class="page-header"> |
|
8 | <h1>Mercurial Repositories</h1> | |
|
8 | <h1 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb}</h1> | |
|
9 | 9 | <ul class="page-nav"> |
|
10 | 10 | </ul> |
|
11 | 11 | </div> |
@@ -7,7 +7,7 b'' | |||
|
7 | 7 | <body> |
|
8 | 8 | <div id="container"> |
|
9 | 9 | <div class="page-header"> |
|
10 | <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / files</h1> | |
|
10 | <h1 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb} / files</h1> | |
|
11 | 11 | |
|
12 | 12 | <form action="{url}log"> |
|
13 | 13 | {sessionvars%hiddenformentry} |
@@ -247,10 +247,11 b" indexentry = '" | |||
|
247 | 247 | <td class="age">{lastchange|rfc822date}</td> |
|
248 | 248 | <td class="indexlinks">{archives%indexarchiveentry}</td> |
|
249 | 249 | <td> |
|
250 | <div class="rss_logo"> | |
|
251 | <a href="{url}rss-log">RSS</a> | |
|
252 | <a href="{url}atom-log">Atom</a> | |
|
253 | </div> | |
|
250 | {if(isdirectory, '', | |
|
251 | '<div class="rss_logo"> | |
|
252 | <a href="{url}rss-log">RSS</a> <a href="{url}atom-log">Atom</a> | |
|
253 | </div>' | |
|
254 | )} | |
|
254 | 255 | </td> |
|
255 | 256 | </tr>\n' |
|
256 | 257 | indexarchiveentry = '<a href="{url}archive/{node|short}{extension}">{type|escape}</a> ' |
@@ -258,3 +259,4 b' index = index.tmpl' | |||
|
258 | 259 | urlparameter = '{separator}{name}={value|urlescape}' |
|
259 | 260 | hiddenformentry = '<input type="hidden" name="{name}" value="{value|escape}" />' |
|
260 | 261 | graph = graph.tmpl |
|
262 | breadcrumb = '> <a href="{url}">{name}</a> ' |
@@ -7,7 +7,7 b'' | |||
|
7 | 7 | <body> |
|
8 | 8 | <div id="container"> |
|
9 | 9 | <div class="page-header"> |
|
10 | <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / not found: {repo|escape}</h1> | |
|
10 | <h1 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb} / not found: {repo|escape}</h1> | |
|
11 | 11 | |
|
12 | 12 | <form action="{url}log"> |
|
13 | 13 | {sessionvars%hiddenformentry} |
@@ -7,7 +7,7 b'' | |||
|
7 | 7 | <body> |
|
8 | 8 | <div id="container"> |
|
9 | 9 | <div class="page-header"> |
|
10 | <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / search</h1> | |
|
10 | <h1 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb} / search</h1> | |
|
11 | 11 | |
|
12 | 12 | <form action="{url}log"> |
|
13 | 13 | {sessionvars%hiddenformentry} |
@@ -7,7 +7,7 b'' | |||
|
7 | 7 | <body> |
|
8 | 8 | <div id="container"> |
|
9 | 9 | <div class="page-header"> |
|
10 | <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / shortlog</h1> | |
|
10 | <h1 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb} / shortlog</h1> | |
|
11 | 11 | |
|
12 | 12 | <form action="{url}log"> |
|
13 | 13 | {sessionvars%hiddenformentry} |
@@ -7,7 +7,7 b'' | |||
|
7 | 7 | <body> |
|
8 | 8 | <div id="container"> |
|
9 | 9 | <div class="page-header"> |
|
10 | <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / summary</h1> | |
|
10 | <h1 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb} / summary</h1> | |
|
11 | 11 | |
|
12 | 12 | <form action="{url}log"> |
|
13 | 13 | {sessionvars%hiddenformentry} |
@@ -7,7 +7,7 b'' | |||
|
7 | 7 | <body> |
|
8 | 8 | <div id="container"> |
|
9 | 9 | <div class="page-header"> |
|
10 | <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / tags</h1> | |
|
10 | <h1 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb} / tags</h1> | |
|
11 | 11 | |
|
12 | 12 | <form action="{url}log"> |
|
13 | 13 | {sessionvars%hiddenformentry} |
@@ -23,10 +23,16 b'' | |||
|
23 | 23 | <ul> |
|
24 | 24 | <li><a href="{url}help{sessionvars%urlparameter}">help</a></li> |
|
25 | 25 | </ul> |
|
26 | <p> | |
|
27 | <div class="atom-logo"> | |
|
28 | <a href="{url}atom-bookmarks" title="subscribe to atom feed"> | |
|
29 | <img class="atom-logo" src="{staticurl}feed-icon-14x14.png" alt="atom feed"> | |
|
30 | </a> | |
|
31 | </div> | |
|
26 | 32 | </div> |
|
27 | 33 | |
|
28 | 34 | <div class="main"> |
|
29 | <h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2> | |
|
35 | <h2 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb}</h2> | |
|
30 | 36 | <h3>bookmarks</h3> |
|
31 | 37 | |
|
32 | 38 | <form class="search" action="{url}log"> |
@@ -1,9 +1,9 b'' | |||
|
1 | 1 | {header} |
|
2 | 2 | <title>{repo|escape}: branches</title> |
|
3 | 3 | <link rel="alternate" type="application/atom+xml" |
|
4 |
href="{url}atom- |
|
|
4 | href="{url}atom-branches" title="Atom feed for {repo|escape}: branches" /> | |
|
5 | 5 | <link rel="alternate" type="application/rss+xml" |
|
6 |
href="{url}rss- |
|
|
6 | href="{url}rss-branches" title="RSS feed for {repo|escape}: branches" /> | |
|
7 | 7 | </head> |
|
8 | 8 | <body> |
|
9 | 9 | |
@@ -23,10 +23,16 b'' | |||
|
23 | 23 | <ul> |
|
24 | 24 | <li><a href="{url}help{sessionvars%urlparameter}">help</a></li> |
|
25 | 25 | </ul> |
|
26 | <p> | |
|
27 | <div class="atom-logo"> | |
|
28 | <a href="{url}atom-branches" title="subscribe to atom feed"> | |
|
29 | <img class="atom-logo" src="{staticurl}feed-icon-14x14.png" alt="atom feed"> | |
|
30 | </a> | |
|
31 | </div> | |
|
26 | 32 | </div> |
|
27 | 33 | |
|
28 | 34 | <div class="main"> |
|
29 | <h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2> | |
|
35 | <h2 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb}</h2> | |
|
30 | 36 | <h3>branches</h3> |
|
31 | 37 | |
|
32 | 38 | <form class="search" action="{url}log"> |
@@ -30,7 +30,7 b'' | |||
|
30 | 30 | |
|
31 | 31 | <div class="main"> |
|
32 | 32 | |
|
33 | <h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2> | |
|
33 | <h2 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb}</h2> | |
|
34 | 34 | <h3>changeset {rev}:{node|short} {changesetbranch%changelogbranchname} {changesettag} {changesetbookmark}</h3> |
|
35 | 35 | |
|
36 | 36 | <form class="search" action="{url}log"> |
@@ -74,6 +74,14 b' files, or words in the commit message</d' | |||
|
74 | 74 | </div> |
|
75 | 75 | </td> |
|
76 | 76 | </tr> |
|
77 | <tr> | |
|
78 | <th class="author">change baseline</th> | |
|
79 | <td class="author">{parent%changesetbaseline}</td> | |
|
80 | </tr> | |
|
81 | <tr> | |
|
82 | <th class="author">current baseline</th> | |
|
83 | <td class="author"><a href="{url}rev/{currentbaseline|short}{sessionvars%urlparameter}">{currentbaseline|short}</a></td> | |
|
84 | </tr> | |
|
77 | 85 | </table> |
|
78 | 86 | |
|
79 | 87 | <div class="overflow"> |
@@ -23,7 +23,7 b'' | |||
|
23 | 23 | |
|
24 | 24 | <div class="main"> |
|
25 | 25 | |
|
26 | <h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2> | |
|
26 | <h2 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb}</h2> | |
|
27 | 27 | <h3>error</h3> |
|
28 | 28 | |
|
29 | 29 | <form class="search" action="{url}log"> |
@@ -36,7 +36,7 b'' | |||
|
36 | 36 | </div> |
|
37 | 37 | |
|
38 | 38 | <div class="main"> |
|
39 | <h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2> | |
|
39 | <h2 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb}</h2> | |
|
40 | 40 | <h3>annotate {file|escape} @ {rev}:{node|short}</h3> |
|
41 | 41 | |
|
42 | 42 | <form class="search" action="{url}log"> |
@@ -35,7 +35,7 b'' | |||
|
35 | 35 | </div> |
|
36 | 36 | |
|
37 | 37 | <div class="main"> |
|
38 | <h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2> | |
|
38 | <h2 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb}</h2> | |
|
39 | 39 | <h3>comparison {file|escape} @ {rev}:{node|short}</h3> |
|
40 | 40 | |
|
41 | 41 | <form class="search" action="{url}log"> |
@@ -35,7 +35,7 b'' | |||
|
35 | 35 | </div> |
|
36 | 36 | |
|
37 | 37 | <div class="main"> |
|
38 | <h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2> | |
|
38 | <h2 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb}</h2> | |
|
39 | 39 | <h3>diff {file|escape} @ {rev}:{node|short}</h3> |
|
40 | 40 | |
|
41 | 41 | <form class="search" action="{url}log"> |
@@ -35,10 +35,15 b'' | |||
|
35 | 35 | <ul> |
|
36 | 36 | <li><a href="{url}help{sessionvars%urlparameter}">help</a></li> |
|
37 | 37 | </ul> |
|
38 | <p> | |
|
39 | <div class="atom-logo"> | |
|
40 | <a href="{url}atom-log/{node|short}/{file|urlescape}" title="subscribe to atom feed"> | |
|
41 | <img class="atom-logo" src="{staticurl}feed-icon-14x14.png" alt="atom feed"></a> | |
|
42 | </div> | |
|
38 | 43 | </div> |
|
39 | 44 | |
|
40 | 45 | <div class="main"> |
|
41 | <h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2> | |
|
46 | <h2 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb}</h2> | |
|
42 | 47 | <h3>log {file|escape}</h3> |
|
43 | 48 | |
|
44 | 49 | <form class="search" action="{url}log"> |
@@ -34,7 +34,7 b'' | |||
|
34 | 34 | </div> |
|
35 | 35 | |
|
36 | 36 | <div class="main"> |
|
37 | <h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2> | |
|
37 | <h2 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb}</h2> | |
|
38 | 38 | <h3>view {file|escape} @ {rev}:{node|short}</h3> |
|
39 | 39 | |
|
40 | 40 | <form class="search" action="{url}log"> |
@@ -28,10 +28,16 b'' | |||
|
28 | 28 | <ul> |
|
29 | 29 | <li><a href="{url}help{sessionvars%urlparameter}">help</a></li> |
|
30 | 30 | </ul> |
|
31 | <p> | |
|
32 | <div class="atom-logo"> | |
|
33 | <a href="{url}atom-log" title="subscribe to atom feed"> | |
|
34 | <img class="atom-logo" src="{staticurl}feed-icon-14x14.png" alt="atom feed"> | |
|
35 | </a> | |
|
36 | </div> | |
|
31 | 37 | </div> |
|
32 | 38 | |
|
33 | 39 | <div class="main"> |
|
34 | <h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2> | |
|
40 | <h2 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb}</h2> | |
|
35 | 41 | <h3>graph</h3> |
|
36 | 42 | |
|
37 | 43 | <form class="search" action="{url}log"> |
@@ -22,7 +22,7 b'' | |||
|
22 | 22 | </div> |
|
23 | 23 | |
|
24 | 24 | <div class="main"> |
|
25 | <h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2> | |
|
25 | <h2 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb}</h2> | |
|
26 | 26 | <h3>Help: {topic}</h3> |
|
27 | 27 | |
|
28 | 28 | <form class="search" action="{url}log"> |
@@ -22,7 +22,7 b'' | |||
|
22 | 22 | </div> |
|
23 | 23 | |
|
24 | 24 | <div class="main"> |
|
25 | <h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2> | |
|
25 | <h2 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb}</h2> | |
|
26 | 26 | <form class="search" action="{url}log"> |
|
27 | 27 | {sessionvars%hiddenformentry} |
|
28 | 28 | <p><input name="rev" id="search1" type="text" size="30" /></p> |
@@ -9,7 +9,7 b'' | |||
|
9 | 9 | <img src="{staticurl}{logoimg}" width=75 height=90 border=0 alt="mercurial" /></a> |
|
10 | 10 | </div> |
|
11 | 11 | <div class="main"> |
|
12 | <h2>Mercurial Repositories</h2> | |
|
12 | <h2 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb}</h2> | |
|
13 | 13 | |
|
14 | 14 | <table class="bigtable"> |
|
15 | 15 | <tr> |
@@ -18,6 +18,7 b'' | |||
|
18 | 18 | <th><a href="?sort={sort_contact}">Contact</a></th> |
|
19 | 19 | <th><a href="?sort={sort_lastchange}">Last modified</a></th> |
|
20 | 20 | <th> </th> |
|
21 | <th> </th> | |
|
21 | 22 | </tr> |
|
22 | 23 | {entries%indexentry} |
|
23 | 24 | </table> |
@@ -29,7 +29,7 b'' | |||
|
29 | 29 | </div> |
|
30 | 30 | |
|
31 | 31 | <div class="main"> |
|
32 | <h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2> | |
|
32 | <h2 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb}</h2> | |
|
33 | 33 | <h3>directory {path|escape} @ {rev}:{node|short} {tags%changelogtag}</h3> |
|
34 | 34 | |
|
35 | 35 | <form class="search" action="{url}log"> |
@@ -101,6 +101,8 b" changelogparent = '" | |||
|
101 | 101 | |
|
102 | 102 | changesetparent = '<a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a> ' |
|
103 | 103 | |
|
104 | changesetbaseline = '<a href="{url}rev/{node|short}:{originalnode|short}{sessionvars%urlparameter}">{node|short}</a> ' | |
|
105 | ||
|
104 | 106 | filerevparent = '<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{rename%filerename}{node|short}</a> ' |
|
105 | 107 | filerevchild = '<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a> ' |
|
106 | 108 | |
@@ -211,6 +213,13 b" indexentry = '" | |||
|
211 | 213 | <td>{contact|obfuscate}</td> |
|
212 | 214 | <td class="age">{lastchange|rfc822date}</td> |
|
213 | 215 | <td class="indexlinks">{archives%indexarchiveentry}</td> |
|
216 | <td> | |
|
217 | {if(isdirectory, '', | |
|
218 | '<a href="{url}atom-log" title="subscribe to repository atom feed"> | |
|
219 | <img class="atom-logo" src="{staticurl}feed-icon-14x14.png" alt="subscribe to repository atom feed"> | |
|
220 | </a>' | |
|
221 | )} | |
|
222 | </td> | |
|
214 | 223 | </tr>\n' |
|
215 | 224 | indexarchiveentry = '<a href="{url}archive/{node|short}{extension|urlescape}"> ↓{type|escape}</a>' |
|
216 | 225 | index = index.tmpl |
@@ -222,3 +231,4 b' notfound = notfound.tmpl' | |||
|
222 | 231 | error = error.tmpl |
|
223 | 232 | urlparameter = '{separator}{name}={value|urlescape}' |
|
224 | 233 | hiddenformentry = '<input type="hidden" name="{name}" value="{value|escape}" />' |
|
234 | breadcrumb = '> <a href="{url}">{name}</a> ' |
@@ -20,7 +20,7 b'' | |||
|
20 | 20 | </div> |
|
21 | 21 | |
|
22 | 22 | <div class="main"> |
|
23 | <h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2> | |
|
23 | <h2 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb}</h2> | |
|
24 | 24 | <h3>searching for '{query|escape}'</h3> |
|
25 | 25 | |
|
26 | 26 | <form class="search" action="{url}log"> |
@@ -30,10 +30,16 b'' | |||
|
30 | 30 | <ul> |
|
31 | 31 | <li><a href="{url}help{sessionvars%urlparameter}">help</a></li> |
|
32 | 32 | </ul> |
|
33 | <p> | |
|
34 | <div class="atom-logo"> | |
|
35 | <a href="{url}atom-log" title="subscribe to atom feed"> | |
|
36 | <img class="atom-logo" src="{staticurl}feed-icon-14x14.png" alt="atom feed"> | |
|
37 | </a> | |
|
38 | </div> | |
|
33 | 39 | </div> |
|
34 | 40 | |
|
35 | 41 | <div class="main"> |
|
36 | <h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2> | |
|
42 | <h2 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb}</h2> | |
|
37 | 43 | <h3>log</h3> |
|
38 | 44 | |
|
39 | 45 | <form class="search" action="{url}log"> |
@@ -23,10 +23,15 b'' | |||
|
23 | 23 | <ul> |
|
24 | 24 | <li><a href="{url}help{sessionvars%urlparameter}">help</a></li> |
|
25 | 25 | </ul> |
|
26 | <p> | |
|
27 | <div class="atom-logo"> | |
|
28 | <a href="{url}atom-tags" title="subscribe to atom feed"> | |
|
29 | <img class="atom-logo" src="{staticurl}feed-icon-14x14.png" alt="atom feed"></a> | |
|
30 | </div> | |
|
26 | 31 | </div> |
|
27 | 32 | |
|
28 | 33 | <div class="main"> |
|
29 | <h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2> | |
|
34 | <h2 class="breadcrumb"><a href="/">Mercurial</a> {pathdef%breadcrumb}</h2> | |
|
30 | 35 | <h3>tags</h3> |
|
31 | 36 | |
|
32 | 37 | <form class="search" action="{url}log"> |
@@ -9,4 +9,6 b' tags = tags.tmpl' | |||
|
9 | 9 | tagentry = tagentry.tmpl |
|
10 | 10 | bookmarks = bookmarks.tmpl |
|
11 | 11 | bookmarkentry = bookmarkentry.tmpl |
|
12 | branches = branches.tmpl | |
|
13 | branchentry = branchentry.tmpl | |
|
12 | 14 | error = error.tmpl |
@@ -18,7 +18,7 b'' | |||
|
18 | 18 | <a type="application/atom+xml" href="{url}atom-branches">atom</a> |
|
19 | 19 | </div> |
|
20 | 20 | |
|
21 | <h2>branches:</h2> | |
|
21 | <h2><a href="/">Mercurial</a> {pathdef%breadcrumb} / branches</h2> | |
|
22 | 22 | |
|
23 | 23 | <ul id="tagEntries"> |
|
24 | 24 | {entries%branchentry} |
@@ -19,7 +19,7 b'' | |||
|
19 | 19 | <a type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}">atom</a> |
|
20 | 20 | </div> |
|
21 | 21 | |
|
22 | <h2>changelog for {repo|escape}</h2> | |
|
22 | <h2><a href="/">Mercurial</a> {pathdef%breadcrumb} / changelog</h2> | |
|
23 | 23 | |
|
24 | 24 | <form action="{url}log"> |
|
25 | 25 | {sessionvars%hiddenformentry} |
@@ -15,7 +15,7 b'' | |||
|
15 | 15 | <a href="{url}help{sessionvars%urlparameter}">help</a> |
|
16 | 16 | </div> |
|
17 | 17 | |
|
18 | <h2>changeset: {desc|strip|escape|firstline|nonempty}</h2> | |
|
18 | <h2><a href="/">Mercurial</a> {pathdef%breadcrumb} / changeset: {desc|strip|escape|firstline|nonempty}</h2> | |
|
19 | 19 | |
|
20 | 20 | <table id="changesetEntry"> |
|
21 | 21 | <tr> |
@@ -17,7 +17,7 b'' | |||
|
17 | 17 | <a href="{url}help{sessionvars%urlparameter}">help</a> |
|
18 | 18 | </div> |
|
19 | 19 | |
|
20 | <h2>Annotate {file|escape}</h2> | |
|
20 | <h2><a href="/">Mercurial</a> {pathdef%breadcrumb} / annotate {file|escape}</h2> | |
|
21 | 21 | |
|
22 | 22 | <table> |
|
23 | 23 | <tr> |
@@ -17,7 +17,7 b'' | |||
|
17 | 17 | <a href="{url}help{sessionvars%urlparameter}">help</a> |
|
18 | 18 | </div> |
|
19 | 19 | |
|
20 | <h2>{file|escape}</h2> | |
|
20 | <h2><a href="/">Mercurial</a> {pathdef%breadcrumb} / {file|escape}</h2> | |
|
21 | 21 | |
|
22 | 22 | <table id="filediffEntry"> |
|
23 | 23 | <tr> |
@@ -20,7 +20,7 b'' | |||
|
20 | 20 | <a type="application/atom+xml" href="{url}atom-log/tip/{file|urlescape}" title="Atom feed for {repo|escape}:{file}">atom</a> |
|
21 | 21 | </div> |
|
22 | 22 | |
|
23 | <h2>{file|escape} revision history</h2> | |
|
23 | <h2><a href="/">Mercurial</a> {pathdef%breadcrumb} / {file|escape} revision history</h2> | |
|
24 | 24 | |
|
25 | 25 | <p>navigate: <small class="navigate">{nav%filenav}</small></p> |
|
26 | 26 |
@@ -17,7 +17,7 b'' | |||
|
17 | 17 | <a href="{url}help{sessionvars%urlparameter}">help</a> |
|
18 | 18 | </div> |
|
19 | 19 | |
|
20 | <h2>{file|escape}</h2> | |
|
20 | <h2><a href="/">Mercurial</a> {pathdef%breadcrumb} / {file|escape}</h2> | |
|
21 | 21 | |
|
22 | 22 | <table> |
|
23 | 23 | <tr> |
@@ -17,7 +17,7 b'' | |||
|
17 | 17 | <a href="{url}help{sessionvars%urlparameter}">help</a> |
|
18 | 18 | </div> |
|
19 | 19 | |
|
20 | <h2>graph</h2> | |
|
20 | <h2><a href="/">Mercurial</a> {pathdef%breadcrumb} / graph</h2> | |
|
21 | 21 | |
|
22 | 22 | <form action="{url}log"> |
|
23 | 23 | {sessionvars%hiddenformentry} |
@@ -3,7 +3,7 b'' | |||
|
3 | 3 | </head> |
|
4 | 4 | <body> |
|
5 | 5 | |
|
6 | <h2>Mercurial Repositories</h2> | |
|
6 | <h2><a href="/">Mercurial</a> {pathdef%breadcrumb}</h2> | |
|
7 | 7 | |
|
8 | 8 | <table> |
|
9 | 9 | <tr> |
@@ -14,7 +14,7 b'' | |||
|
14 | 14 | <a href="{url}help{sessionvars%urlparameter}">help</a> |
|
15 | 15 | </div> |
|
16 | 16 | |
|
17 | <h2>files for changeset {node|short}: {path|escape}</h2> | |
|
17 | <h2><a href="/">Mercurial</a> {pathdef%breadcrumb} / files for changeset <a href="{url}rev/{node|short}">{node|short}</a>: {path|escape}</h2> | |
|
18 | 18 | |
|
19 | 19 | <table cellpadding="0" cellspacing="0"> |
|
20 | 20 | <tr class="parity{upparity}"> |
@@ -181,3 +181,4 b' notfound = notfound.tmpl' | |||
|
181 | 181 | error = error.tmpl |
|
182 | 182 | urlparameter = '{separator}{name}={value|urlescape}' |
|
183 | 183 | hiddenformentry = '<input type="hidden" name="{name}" value="{value|escape}" />' |
|
184 | breadcrumb = '> <a href="{url}">{name}</a> ' |
@@ -19,7 +19,7 b'' | |||
|
19 | 19 | <a type="application/rss+xml" href="{url}atom-log" title="Atom feed for {repo|escape}">atom</a> |
|
20 | 20 | </div> |
|
21 | 21 | |
|
22 | <h2>shortlog for {repo|escape}</h2> | |
|
22 | <h2><a href="/">Mercurial</a> {pathdef%breadcrumb} / shortlog</h2> | |
|
23 | 23 | |
|
24 | 24 | <form action="{url}log"> |
|
25 | 25 | {sessionvars%hiddenformentry} |
@@ -18,7 +18,7 b'' | |||
|
18 | 18 | <a type="application/atom+xml" href="{url}atom-tags">atom</a> |
|
19 | 19 | </div> |
|
20 | 20 | |
|
21 | <h2>tags:</h2> | |
|
21 | <h2><a href="/">Mercurial</a> {pathdef%breadcrumb} / tags</h2> | |
|
22 | 22 | |
|
23 | 23 | <ul id="tagEntries"> |
|
24 | 24 | {entries%tagentry} |
@@ -323,3 +323,11 b' ul#graphnodes li .info {' | |||
|
323 | 323 | .block { |
|
324 | 324 | border-top: 1px solid #999; |
|
325 | 325 | } |
|
326 | ||
|
327 | .breadcrumb { | |
|
328 | color: gray; | |
|
329 | } | |
|
330 | ||
|
331 | .breadcrumb a { | |
|
332 | color: blue; | |
|
333 | } |
@@ -524,3 +524,7 b' ul#graphnodes li .info {' | |||
|
524 | 524 | border-top: 1px solid #999; |
|
525 | 525 | } |
|
526 | 526 | /** end of comparison **/ |
|
527 | ||
|
528 | .breadcrumb a:hover { | |
|
529 | text-decoration:underline; | |
|
530 | } |
@@ -60,6 +60,12 b' body {' | |||
|
60 | 60 | border: 0; |
|
61 | 61 | } |
|
62 | 62 | |
|
63 | .atom-logo img{ | |
|
64 | width: 14px; | |
|
65 | height: 14px; | |
|
66 | border: 0; | |
|
67 | } | |
|
68 | ||
|
63 | 69 | .menu a { color: black; display: block; } |
|
64 | 70 | |
|
65 | 71 | .search { |
@@ -312,3 +318,11 b' ul#graphnodes li .info {' | |||
|
312 | 318 | .block { |
|
313 | 319 | border-top: 1px solid #999; |
|
314 | 320 | } |
|
321 | ||
|
322 | .breadcrumb { | |
|
323 | color: gray; | |
|
324 | } | |
|
325 | ||
|
326 | .breadcrumb a { | |
|
327 | color: blue; | |
|
328 | } |
@@ -613,7 +613,7 b' class ui(object):' | |||
|
613 | 613 | ('&None', 'E&xec', 'Sym&link') Responses are case insensitive. |
|
614 | 614 | If ui is not interactive, the default is returned. |
|
615 | 615 | """ |
|
616 | resps = [s[s.index('&')+1].lower() for s in choices] | |
|
616 | resps = [s[s.index('&') + 1].lower() for s in choices] | |
|
617 | 617 | while True: |
|
618 | 618 | r = self.prompt(msg, resps[default]) |
|
619 | 619 | if r.lower() in resps: |
@@ -164,7 +164,7 b' if has_https:' | |||
|
164 | 164 | if sock is not None: |
|
165 | 165 | sock.close() |
|
166 | 166 | |
|
167 |
raise socket.error |
|
|
167 | raise socket.error(msg) | |
|
168 | 168 | |
|
169 | 169 | class httpconnection(keepalive.HTTPConnection): |
|
170 | 170 | # must be able to send big bundle as stream. |
@@ -64,7 +64,7 b' shellquote = platform.shellquote' | |||
|
64 | 64 | spawndetached = platform.spawndetached |
|
65 | 65 | split = platform.split |
|
66 | 66 | sshargs = platform.sshargs |
|
67 | statfiles = platform.statfiles | |
|
67 | statfiles = getattr(osutil, 'statfiles', platform.statfiles) | |
|
68 | 68 | termwidth = platform.termwidth |
|
69 | 69 | testpid = platform.testpid |
|
70 | 70 | umask = platform.umask |
@@ -244,9 +244,12 b' class propertycache(object):' | |||
|
244 | 244 | self.name = func.__name__ |
|
245 | 245 | def __get__(self, obj, type=None): |
|
246 | 246 | result = self.func(obj) |
|
247 |
se |
|
|
247 | self.cachevalue(obj, result) | |
|
248 | 248 | return result |
|
249 | 249 | |
|
250 | def cachevalue(self, obj, value): | |
|
251 | setattr(obj, self.name, value) | |
|
252 | ||
|
250 | 253 | def pipefilter(s, cmd): |
|
251 | 254 | '''filter string S through command CMD, returning its output''' |
|
252 | 255 | p = subprocess.Popen(cmd, shell=True, close_fds=closefds, |
@@ -479,11 +482,9 b' def checksignature(func):' | |||
|
479 | 482 | |
|
480 | 483 | def copyfile(src, dest): |
|
481 | 484 | "copy a file, preserving mode and atime/mtime" |
|
485 | if os.path.lexists(dest): | |
|
486 | unlink(dest) | |
|
482 | 487 | if os.path.islink(src): |
|
483 | try: | |
|
484 | os.unlink(dest) | |
|
485 | except OSError: | |
|
486 | pass | |
|
487 | 488 | os.symlink(os.readlink(src), dest) |
|
488 | 489 | else: |
|
489 | 490 | try: |
@@ -25,6 +25,7 b' def _normpath(f):' | |||
|
25 | 25 | return f |
|
26 | 26 | |
|
27 | 27 | def _verify(repo): |
|
28 | repo = repo.unfiltered() | |
|
28 | 29 | mflinkrevs = {} |
|
29 | 30 | filelinkrevs = {} |
|
30 | 31 | filenodes = {} |
@@ -370,7 +370,7 b' def unlink(f):' | |||
|
370 | 370 | if e.errno != errno.EEXIST: |
|
371 | 371 | raise |
|
372 | 372 | else: |
|
373 |
raise IOError |
|
|
373 | raise IOError(errno.EEXIST, "No usable temporary filename found") | |
|
374 | 374 | |
|
375 | 375 | try: |
|
376 | 376 | os.unlink(temp) |
@@ -7,7 +7,7 b'' | |||
|
7 | 7 | |
|
8 | 8 | from i18n import _ |
|
9 | 9 | import osutil, encoding |
|
10 | import errno, msvcrt, os, re, sys, _winreg | |
|
10 | import errno, msvcrt, os, re, stat, sys, _winreg | |
|
11 | 11 | |
|
12 | 12 | import win32 |
|
13 | 13 | executablepath = win32.executablepath |
@@ -213,10 +213,15 b' def findexe(command):' | |||
|
213 | 213 | return executable |
|
214 | 214 | return findexisting(os.path.expanduser(os.path.expandvars(command))) |
|
215 | 215 | |
|
216 | _wantedkinds = set([stat.S_IFREG, stat.S_IFLNK]) | |
|
217 | ||
|
216 | 218 | def statfiles(files): |
|
217 |
'''Stat each file in files |
|
|
219 | '''Stat each file in files. Yield each stat, or None if a file | |
|
220 | does not exist or has a type we don't care about. | |
|
221 | ||
|
218 | 222 | Cluster and cache stat per directory to minimize number of OS stat calls.''' |
|
219 | 223 | dircache = {} # dirname -> filename -> status | None if file does not exist |
|
224 | getkind = stat.S_IFMT | |
|
220 | 225 | for nf in files: |
|
221 | 226 | nf = normcase(nf) |
|
222 | 227 | dir, base = os.path.split(nf) |
@@ -226,7 +231,8 b' def statfiles(files):' | |||
|
226 | 231 | if cache is None: |
|
227 | 232 | try: |
|
228 | 233 | dmap = dict([(normcase(n), s) |
|
229 |
for n, k, s in osutil.listdir(dir, True) |
|
|
234 | for n, k, s in osutil.listdir(dir, True) | |
|
235 | if getkind(s.st_mode) in _wantedkinds]) | |
|
230 | 236 | except OSError, err: |
|
231 | 237 | # handle directory not found in Python version prior to 2.5 |
|
232 | 238 | # Python <= 2.4 returns native Windows code 3 in errno |
@@ -269,9 +275,13 b' def _removedirs(name):' | |||
|
269 | 275 | break |
|
270 | 276 | head, tail = os.path.split(head) |
|
271 | 277 | |
|
272 | def unlinkpath(f): | |
|
278 | def unlinkpath(f, ignoremissing=False): | |
|
273 | 279 | """unlink and remove the directory if it is empty""" |
|
274 | unlink(f) | |
|
280 | try: | |
|
281 | unlink(f) | |
|
282 | except OSError, e: | |
|
283 | if not (ignoremissing and e.errno == errno.ENOENT): | |
|
284 | raise | |
|
275 | 285 | # try removing directories that might now be empty |
|
276 | 286 | try: |
|
277 | 287 | _removedirs(os.path.dirname(f)) |
@@ -10,7 +10,6 b' from i18n import _' | |||
|
10 | 10 | from node import bin, hex |
|
11 | 11 | import changegroup as changegroupmod |
|
12 | 12 | import peer, error, encoding, util, store |
|
13 | import discovery, phases | |
|
14 | 13 | |
|
15 | 14 | # abstract batching support |
|
16 | 15 | |
@@ -346,6 +345,7 b' class ooberror(object):' | |||
|
346 | 345 | self.message = message |
|
347 | 346 | |
|
348 | 347 | def dispatch(repo, proto, command): |
|
348 | repo = repo.filtered("served") | |
|
349 | 349 | func, spec = commands[command] |
|
350 | 350 | args = proto.getargs(spec) |
|
351 | 351 | return func(repo, proto, *args) |
@@ -362,6 +362,7 b' def options(cmd, keys, others):' | |||
|
362 | 362 | return opts |
|
363 | 363 | |
|
364 | 364 | def batch(repo, proto, cmds, others): |
|
365 | repo = repo.filtered("served") | |
|
365 | 366 | res = [] |
|
366 | 367 | for pair in cmds.split(';'): |
|
367 | 368 | op, args = pair.split(' ', 1) |
@@ -399,7 +400,7 b' def between(repo, proto, pairs):' | |||
|
399 | 400 | return "".join(r) |
|
400 | 401 | |
|
401 | 402 | def branchmap(repo, proto): |
|
402 |
branchmap = |
|
|
403 | branchmap = repo.branchmap() | |
|
403 | 404 | heads = [] |
|
404 | 405 | for branch, nodes in branchmap.iteritems(): |
|
405 | 406 | branchname = urllib.quote(encoding.fromlocal(branch)) |
@@ -455,7 +456,7 b' def getbundle(repo, proto, others):' | |||
|
455 | 456 | return streamres(proto.groupchunks(cg)) |
|
456 | 457 | |
|
457 | 458 | def heads(repo, proto): |
|
458 |
h = |
|
|
459 | h = repo.heads() | |
|
459 | 460 | return encodelist(h) + "\n" |
|
460 | 461 | |
|
461 | 462 | def hello(repo, proto): |
@@ -478,8 +479,6 b' def lookup(repo, proto, key):' | |||
|
478 | 479 | try: |
|
479 | 480 | k = encoding.tolocal(key) |
|
480 | 481 | c = repo[k] |
|
481 | if c.phase() == phases.secret: | |
|
482 | raise error.RepoLookupError(_("unknown revision '%s'") % k) | |
|
483 | 482 | r = c.hex() |
|
484 | 483 | success = 1 |
|
485 | 484 | except Exception, inst: |
@@ -546,8 +545,9 b' def stream(repo, proto):' | |||
|
546 | 545 | try: |
|
547 | 546 | repo.ui.debug('scanning\n') |
|
548 | 547 | for name, ename, size in repo.store.walk(): |
|
549 |
|
|
|
550 | total_bytes += size | |
|
548 | if size: | |
|
549 | entries.append((name, size)) | |
|
550 | total_bytes += size | |
|
551 | 551 | finally: |
|
552 | 552 | lock.release() |
|
553 | 553 | except error.LockError: |
@@ -593,7 +593,7 b' def unbundle(repo, proto, heads):' | |||
|
593 | 593 | their_heads = decodelist(heads) |
|
594 | 594 | |
|
595 | 595 | def check_heads(): |
|
596 |
heads = |
|
|
596 | heads = repo.heads() | |
|
597 | 597 | heads_hash = util.sha1(''.join(sorted(heads))).digest() |
|
598 | 598 | return (their_heads == ['force'] or their_heads == heads or |
|
599 | 599 | their_heads == ['hashed', heads_hash]) |
@@ -151,6 +151,8 b' def runhg(cmd, env):' | |||
|
151 | 151 | if not e.startswith(b('Not trusting file')) \ |
|
152 | 152 | and not e.startswith(b('warning: Not importing'))] |
|
153 | 153 | if err: |
|
154 | print >> sys.stderr, "stderr from '%s':" % (' '.join(cmd)) | |
|
155 | print >> sys.stderr, '\n'.join([' ' + e for e in err]) | |
|
154 | 156 | return '' |
|
155 | 157 | return out |
|
156 | 158 |
@@ -35,7 +35,7 b' def autodiff(ui, repo, *pats, **opts):' | |||
|
35 | 35 | for chunk in it: |
|
36 | 36 | ui.write(chunk) |
|
37 | 37 | for fn in sorted(brokenfiles): |
|
38 | ui.write('data lost for: %s\n' % fn) | |
|
38 | ui.write(('data lost for: %s\n' % fn)) | |
|
39 | 39 | |
|
40 | 40 | cmdtable = { |
|
41 | 41 | "autodiff": |
@@ -16,6 +16,10 b' twice = False' | |||
|
16 | 16 | if '--twice' in sys.argv: |
|
17 | 17 | sys.argv.remove('--twice') |
|
18 | 18 | twice = True |
|
19 | headeronly = False | |
|
20 | if '--headeronly' in sys.argv: | |
|
21 | sys.argv.remove('--headeronly') | |
|
22 | headeronly = True | |
|
19 | 23 | |
|
20 | 24 | reasons = {'Not modified': 'Not Modified'} # python 2.4 |
|
21 | 25 | |
@@ -31,16 +35,19 b' def request(host, path, show):' | |||
|
31 | 35 | conn.request("GET", '/' + path, None, headers) |
|
32 | 36 | response = conn.getresponse() |
|
33 | 37 | print response.status, reasons.get(response.reason, response.reason) |
|
38 | if show[:1] == ['-']: | |
|
39 | show = sorted(h for h, v in response.getheaders() | |
|
40 | if h.lower() not in show) | |
|
34 | 41 | for h in [h.lower() for h in show]: |
|
35 | 42 | if response.getheader(h, None) is not None: |
|
36 | 43 | print "%s: %s" % (h, response.getheader(h)) |
|
44 | if not headeronly: | |
|
45 | ||
|
46 | data = response.read() | |
|
47 | sys.stdout.write(data) | |
|
37 | 48 | |
|
38 | ||
|
39 |
|
|
|
40 | sys.stdout.write(data) | |
|
41 | ||
|
42 | if twice and response.getheader('ETag', None): | |
|
43 | tag = response.getheader('ETag') | |
|
49 | if twice and response.getheader('ETag', None): | |
|
50 | tag = response.getheader('ETag') | |
|
44 | 51 | |
|
45 | 52 | return response.status |
|
46 | 53 |
@@ -59,7 +59,7 b" if __name__ == '__main__':" | |||
|
59 | 59 | |
|
60 | 60 | if feature not in checks: |
|
61 | 61 | error('skipped: unknown feature: ' + feature) |
|
62 |
|
|
|
62 | sys.exit(2) | |
|
63 | 63 | |
|
64 | 64 | check, desc = checks[feature] |
|
65 | 65 | try: |
@@ -41,6 +41,10 b' def has_cvs():' | |||
|
41 | 41 | re = r'Concurrent Versions System.*?server' |
|
42 | 42 | return matchoutput('cvs --version 2>&1', re) and not has_msys() |
|
43 | 43 | |
|
44 | def has_cvs112(): | |
|
45 | re = r'Concurrent Versions System \(CVS\) 1.12.*?server' | |
|
46 | return matchoutput('cvs --version 2>&1', re) and not has_msys() | |
|
47 | ||
|
44 | 48 | def has_darcs(): |
|
45 | 49 | return matchoutput('darcs --version', r'2\.[2-9]', True) |
|
46 | 50 | |
@@ -278,6 +282,7 b' checks = {' | |||
|
278 | 282 | "bzr114": (has_bzr114, "Canonical's Bazaar client >= 1.14"), |
|
279 | 283 | "cacheable": (has_cacheable_fs, "cacheable filesystem"), |
|
280 | 284 | "cvs": (has_cvs, "cvs client/server"), |
|
285 | "cvs112": (has_cvs112, "cvs client/server >= 1.12"), | |
|
281 | 286 | "darcs": (has_darcs, "darcs client"), |
|
282 | 287 | "docutils": (has_docutils, "Docutils text processing library"), |
|
283 | 288 | "eol-in-paths": (has_eol_in_paths, "end-of-lines in paths"), |
@@ -55,6 +55,8 b' import time' | |||
|
55 | 55 | import re |
|
56 | 56 | import threading |
|
57 | 57 | import killdaemons as killmod |
|
58 | import cPickle as pickle | |
|
59 | import Queue as queue | |
|
58 | 60 | |
|
59 | 61 | processlock = threading.Lock() |
|
60 | 62 | |
@@ -93,7 +95,8 b" IMPL_PATH = 'PYTHONPATH'" | |||
|
93 | 95 | if 'java' in sys.platform: |
|
94 | 96 | IMPL_PATH = 'JYTHONPATH' |
|
95 | 97 | |
|
96 |
requiredtools = [ |
|
|
98 | requiredtools = [os.path.basename(sys.executable), "diff", "grep", "unzip", | |
|
99 | "gunzip", "bunzip2", "sed"] | |
|
97 | 100 | |
|
98 | 101 | defaults = { |
|
99 | 102 | 'jobs': ('HGTEST_JOBS', 1), |
@@ -162,6 +165,8 b' def parseargs():' | |||
|
162 | 165 | parser.add_option("-p", "--port", type="int", |
|
163 | 166 | help="port on which servers should listen" |
|
164 | 167 | " (default: $%s or %d)" % defaults['port']) |
|
168 | parser.add_option("--compiler", type="string", | |
|
169 | help="compiler to build with") | |
|
165 | 170 | parser.add_option("--pure", action="store_true", |
|
166 | 171 | help="use pure Python code instead of C extensions") |
|
167 | 172 | parser.add_option("-R", "--restart", action="store_true", |
@@ -175,6 +180,8 b' def parseargs():' | |||
|
175 | 180 | parser.add_option("-t", "--timeout", type="int", |
|
176 | 181 | help="kill errant tests after TIMEOUT seconds" |
|
177 | 182 | " (default: $%s or %d)" % defaults['timeout']) |
|
183 | parser.add_option("--time", action="store_true", | |
|
184 | help="time how long each test takes") | |
|
178 | 185 | parser.add_option("--tmpdir", type="string", |
|
179 | 186 | help="run tests in the given temporary directory" |
|
180 | 187 | " (implies --keep-tmpdir)") |
@@ -263,6 +270,10 b' def parseargs():' | |||
|
263 | 270 | sys.stderr.write( |
|
264 | 271 | 'warning: --timeout option ignored with --debug\n') |
|
265 | 272 | options.timeout = 0 |
|
273 | if options.time: | |
|
274 | sys.stderr.write( | |
|
275 | 'warning: --time option ignored with --debug\n') | |
|
276 | options.time = False | |
|
266 | 277 | if options.py3k_warnings: |
|
267 | 278 | if sys.version_info[:2] < (2, 6) or sys.version_info[:2] >= (3, 0): |
|
268 | 279 | parser.error('--py3k-warnings can only be used on Python 2.6+') |
@@ -317,7 +328,7 b' def checktools():' | |||
|
317 | 328 | # Before we go any further, check for pre-requisite tools |
|
318 | 329 | # stuff from coreutils (cat, rm, etc) are not tested |
|
319 | 330 | for p in requiredtools: |
|
320 | if os.name == 'nt': | |
|
331 | if os.name == 'nt' and not p.endswith('.exe'): | |
|
321 | 332 | p += '.exe' |
|
322 | 333 | found = findprogram(p) |
|
323 | 334 | if found: |
@@ -345,25 +356,42 b' def cleanup(options):' | |||
|
345 | 356 | def usecorrectpython(): |
|
346 | 357 | # some tests run python interpreter. they must use same |
|
347 | 358 | # interpreter we use or bad things will happen. |
|
348 | exedir, exename = os.path.split(sys.executable) | |
|
349 | if exename in ('python', 'python.exe'): | |
|
350 | path = findprogram(exename) | |
|
351 | if os.path.dirname(path) == exedir: | |
|
352 | return | |
|
359 | pyexename = sys.platform == 'win32' and 'python.exe' or 'python' | |
|
360 | if getattr(os, 'symlink', None): | |
|
361 | vlog("# Making python executable in test path a symlink to '%s'" % | |
|
362 | sys.executable) | |
|
363 | mypython = os.path.join(BINDIR, pyexename) | |
|
364 | try: | |
|
365 | if os.readlink(mypython) == sys.executable: | |
|
366 | return | |
|
367 | os.unlink(mypython) | |
|
368 | except OSError, err: | |
|
369 | if err.errno != errno.ENOENT: | |
|
370 | raise | |
|
371 | if findprogram(pyexename) != sys.executable: | |
|
372 | try: | |
|
373 | os.symlink(sys.executable, mypython) | |
|
374 | except OSError, err: | |
|
375 | # child processes may race, which is harmless | |
|
376 | if err.errno != errno.EEXIST: | |
|
377 | raise | |
|
353 | 378 | else: |
|
354 | exename = 'python' | |
|
355 | vlog('# Making python executable in test path use correct Python') | |
|
356 | mypython = os.path.join(BINDIR, exename) | |
|
357 | try: | |
|
358 | os.symlink(sys.executable, mypython) | |
|
359 | except AttributeError: | |
|
360 | # windows fallback | |
|
361 | shutil.copyfile(sys.executable, mypython) | |
|
362 | shutil.copymode(sys.executable, mypython) | |
|
379 | exedir, exename = os.path.split(sys.executable) | |
|
380 | vlog("# Modifying search path to find %s as %s in '%s'" % | |
|
381 | (exename, pyexename, exedir)) | |
|
382 | path = os.environ['PATH'].split(os.pathsep) | |
|
383 | while exedir in path: | |
|
384 | path.remove(exedir) | |
|
385 | os.environ['PATH'] = os.pathsep.join([exedir] + path) | |
|
386 | if not findprogram(pyexename): | |
|
387 | print "WARNING: Cannot find %s in search path" % pyexename | |
|
363 | 388 | |
|
364 | 389 | def installhg(options): |
|
365 | 390 | vlog("# Performing temporary installation of HG") |
|
366 | 391 | installerrs = os.path.join("tests", "install.err") |
|
392 | compiler = '' | |
|
393 | if options.compiler: | |
|
394 | compiler = '--compiler ' + options.compiler | |
|
367 | 395 | pure = options.pure and "--pure" or "" |
|
368 | 396 | |
|
369 | 397 | # Run installer in hg root |
@@ -377,12 +405,14 b' def installhg(options):' | |||
|
377 | 405 | # least on Windows for now, deal with .pydistutils.cfg bugs |
|
378 | 406 | # when they happen. |
|
379 | 407 | nohome = '' |
|
380 | cmd = ('%s setup.py %s clean --all' | |
|
381 | ' build --build-base="%s"' | |
|
382 | ' install --force --prefix="%s" --install-lib="%s"' | |
|
383 | ' --install-scripts="%s" %s >%s 2>&1' | |
|
384 |
% (sys.executable, pure, |
|
|
385 | INST, PYTHONDIR, BINDIR, nohome, installerrs)) | |
|
408 | cmd = ('%(exe)s setup.py %(pure)s clean --all' | |
|
409 | ' build %(compiler)s --build-base="%(base)s"' | |
|
410 | ' install --force --prefix="%(prefix)s" --install-lib="%(libdir)s"' | |
|
411 | ' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1' | |
|
412 | % dict(exe=sys.executable, pure=pure, compiler=compiler, | |
|
413 | base=os.path.join(HGTMP, "build"), | |
|
414 | prefix=INST, libdir=PYTHONDIR, bindir=BINDIR, | |
|
415 | nohome=nohome, logfile=installerrs)) | |
|
386 | 416 | vlog("# Running", cmd) |
|
387 | 417 | if os.system(cmd) == 0: |
|
388 | 418 | if not options.verbose: |
@@ -447,6 +477,14 b' def installhg(options):' | |||
|
447 | 477 | fn = os.path.join(INST, '..', '.coverage') |
|
448 | 478 | os.environ['COVERAGE_FILE'] = fn |
|
449 | 479 | |
|
480 | def outputtimes(options): | |
|
481 | vlog('# Producing time report') | |
|
482 | times.sort(key=lambda t: (t[1], t[0]), reverse=True) | |
|
483 | cols = '%7.3f %s' | |
|
484 | print '\n%-7s %s' % ('Time', 'Test') | |
|
485 | for test, timetaken in times: | |
|
486 | print cols % (timetaken, test) | |
|
487 | ||
|
450 | 488 | def outputcoverage(options): |
|
451 | 489 | |
|
452 | 490 | vlog('# Producing coverage report') |
@@ -566,10 +604,13 b' def tsttest(test, wd, options, replaceme' | |||
|
566 | 604 | tdir = TESTDIR.replace('\\', '/') |
|
567 | 605 | proc = Popen4('%s -c "%s/hghave %s"' % |
|
568 | 606 | (options.shell, tdir, ' '.join(reqs)), wd, 0) |
|
569 | proc.communicate() | |
|
607 | stdout, stderr = proc.communicate() | |
|
570 | 608 | ret = proc.wait() |
|
571 | 609 | if wifexited(ret): |
|
572 | 610 | ret = os.WEXITSTATUS(ret) |
|
611 | if ret == 2: | |
|
612 | print stdout | |
|
613 | sys.exit(1) | |
|
573 | 614 | return ret == 0 |
|
574 | 615 | |
|
575 | 616 | f = open(test) |
@@ -833,6 +874,7 b' def runone(options, test):' | |||
|
833 | 874 | hgrc = open(HGRCPATH, 'w+') |
|
834 | 875 | hgrc.write('[ui]\n') |
|
835 | 876 | hgrc.write('slash = True\n') |
|
877 | hgrc.write('interactive = False\n') | |
|
836 | 878 | hgrc.write('[defaults]\n') |
|
837 | 879 | hgrc.write('backout = -d "0 0"\n') |
|
838 | 880 | hgrc.write('commit = -d "0 0"\n') |
@@ -891,9 +933,16 b' def runone(options, test):' | |||
|
891 | 933 | replacements.append((re.escape(testtmp), '$TESTTMP')) |
|
892 | 934 | |
|
893 | 935 | os.mkdir(testtmp) |
|
936 | if options.time: | |
|
937 | starttime = time.time() | |
|
894 | 938 | ret, out = runner(testpath, testtmp, options, replacements) |
|
939 | if options.time: | |
|
940 | endtime = time.time() | |
|
941 | times.append((test, endtime - starttime)) | |
|
895 | 942 | vlog("# Ret was:", ret) |
|
896 | 943 | |
|
944 | killdaemons() | |
|
945 | ||
|
897 | 946 | mark = '.' |
|
898 | 947 | |
|
899 | 948 | skipped = (ret == SKIPPED_STATUS) |
@@ -964,8 +1013,6 b' def runone(options, test):' | |||
|
964 | 1013 | sys.stdout.flush() |
|
965 | 1014 | iolock.release() |
|
966 | 1015 | |
|
967 | killdaemons() | |
|
968 | ||
|
969 | 1016 | if not options.keep_tmpdir: |
|
970 | 1017 | shutil.rmtree(testtmp, True) |
|
971 | 1018 | if skipped: |
@@ -1003,6 +1050,8 b' def runchildren(options, tests):' | |||
|
1003 | 1050 | if INST: |
|
1004 | 1051 | installhg(options) |
|
1005 | 1052 | _checkhglib("Testing") |
|
1053 | else: | |
|
1054 | usecorrectpython() | |
|
1006 | 1055 | |
|
1007 | 1056 | optcopy = dict(options.__dict__) |
|
1008 | 1057 | optcopy['jobs'] = 1 |
@@ -1045,7 +1094,13 b' def runchildren(options, tests):' | |||
|
1045 | 1094 | blacklisted.append(test) |
|
1046 | 1095 | else: |
|
1047 | 1096 | job.append(test) |
|
1048 | fps = {} | |
|
1097 | ||
|
1098 | waitq = queue.Queue() | |
|
1099 | ||
|
1100 | # windows lacks os.wait, so we must emulate it | |
|
1101 | def waitfor(proc, rfd): | |
|
1102 | fp = os.fdopen(rfd, 'rb') | |
|
1103 | return lambda: waitq.put((proc.pid, proc.wait(), fp)) | |
|
1049 | 1104 | |
|
1050 | 1105 | for j, job in enumerate(jobs): |
|
1051 | 1106 | if not job: |
@@ -1056,29 +1111,32 b' def runchildren(options, tests):' | |||
|
1056 | 1111 | childopts += ['--tmpdir', childtmp] |
|
1057 | 1112 | cmdline = [PYTHON, sys.argv[0]] + opts + childopts + job |
|
1058 | 1113 | vlog(' '.join(cmdline)) |
|
1059 | fps[os.spawnvp(os.P_NOWAIT, cmdline[0], cmdline)] = os.fdopen(rfd, 'r') | |
|
1114 | proc = subprocess.Popen(cmdline, executable=cmdline[0]) | |
|
1115 | threading.Thread(target=waitfor(proc, rfd)).start() | |
|
1060 | 1116 | os.close(wfd) |
|
1061 | 1117 | signal.signal(signal.SIGINT, signal.SIG_IGN) |
|
1062 | 1118 | failures = 0 |
|
1063 |
|
|
|
1119 | passed, skipped, failed = 0, 0, 0 | |
|
1064 | 1120 | skips = [] |
|
1065 | 1121 | fails = [] |
|
1066 | while fps: | |
|
1067 | pid, status = os.wait() | |
|
1068 | fp = fps.pop(pid) | |
|
1069 | l = fp.read().splitlines() | |
|
1122 | for job in jobs: | |
|
1123 | if not job: | |
|
1124 | continue | |
|
1125 | pid, status, fp = waitq.get() | |
|
1070 | 1126 | try: |
|
1071 | test, skip, fail = map(int, l[:3]) | |
|
1072 |
except |
|
|
1073 | test, skip, fail = 0, 0, 0 | |
|
1074 | split = -fail or len(l) | |
|
1075 | for s in l[3:split]: | |
|
1076 | skips.append(s.split(" ", 1)) | |
|
1077 | for s in l[split:]: | |
|
1078 | fails.append(s.split(" ", 1)) | |
|
1079 | tested += test | |
|
1080 | skipped += skip | |
|
1081 | failed += fail | |
|
1127 | childresults = pickle.load(fp) | |
|
1128 | except (pickle.UnpicklingError, EOFError): | |
|
1129 | sys.exit(255) | |
|
1130 | else: | |
|
1131 | passed += len(childresults['p']) | |
|
1132 | skipped += len(childresults['s']) | |
|
1133 | failed += len(childresults['f']) | |
|
1134 | skips.extend(childresults['s']) | |
|
1135 | fails.extend(childresults['f']) | |
|
1136 | if options.time: | |
|
1137 | childtimes = pickle.load(fp) | |
|
1138 | times.extend(childtimes) | |
|
1139 | ||
|
1082 | 1140 | vlog('pid %d exited, status %d' % (pid, status)) |
|
1083 | 1141 | failures |= status |
|
1084 | 1142 | |
@@ -1093,17 +1151,20 b' def runchildren(options, tests):' | |||
|
1093 | 1151 | |
|
1094 | 1152 | _checkhglib("Tested") |
|
1095 | 1153 | print "# Ran %d tests, %d skipped, %d failed." % ( |
|
1096 |
|
|
|
1154 | passed + failed, skipped, failed) | |
|
1097 | 1155 | |
|
1156 | if options.time: | |
|
1157 | outputtimes(options) | |
|
1098 | 1158 | if options.anycoverage: |
|
1099 | 1159 | outputcoverage(options) |
|
1100 | 1160 | sys.exit(failures != 0) |
|
1101 | 1161 | |
|
1102 | 1162 | results = dict(p=[], f=[], s=[], i=[]) |
|
1103 | 1163 | resultslock = threading.Lock() |
|
1164 | times = [] | |
|
1104 | 1165 | iolock = threading.Lock() |
|
1105 | 1166 | |
|
1106 |
def runqueue(options, tests |
|
|
1167 | def runqueue(options, tests): | |
|
1107 | 1168 | for test in tests: |
|
1108 | 1169 | ret = runone(options, test) |
|
1109 | 1170 | if options.first and ret is not None and not ret: |
@@ -1118,6 +1179,8 b' def runtests(options, tests):' | |||
|
1118 | 1179 | if INST: |
|
1119 | 1180 | installhg(options) |
|
1120 | 1181 | _checkhglib("Testing") |
|
1182 | else: | |
|
1183 | usecorrectpython() | |
|
1121 | 1184 | |
|
1122 | 1185 | if options.restart: |
|
1123 | 1186 | orig = list(tests) |
@@ -1129,7 +1192,7 b' def runtests(options, tests):' | |||
|
1129 | 1192 | print "running all tests" |
|
1130 | 1193 | tests = orig |
|
1131 | 1194 | |
|
1132 |
runqueue(options, tests |
|
|
1195 | runqueue(options, tests) | |
|
1133 | 1196 | |
|
1134 | 1197 | failed = len(results['f']) |
|
1135 | 1198 | tested = len(results['p']) + failed |
@@ -1137,12 +1200,10 b' def runtests(options, tests):' | |||
|
1137 | 1200 | ignored = len(results['i']) |
|
1138 | 1201 | |
|
1139 | 1202 | if options.child: |
|
1140 | fp = os.fdopen(options.child, 'w') | |
|
1141 | fp.write('%d\n%d\n%d\n' % (tested, skipped, failed)) | |
|
1142 | for s in results['s']: | |
|
1143 | fp.write("%s %s\n" % s) | |
|
1144 | for s in results['f']: | |
|
1145 | fp.write("%s %s\n" % s) | |
|
1203 | fp = os.fdopen(options.child, 'wb') | |
|
1204 | pickle.dump(results, fp, pickle.HIGHEST_PROTOCOL) | |
|
1205 | if options.time: | |
|
1206 | pickle.dump(times, fp, pickle.HIGHEST_PROTOCOL) | |
|
1146 | 1207 | fp.close() |
|
1147 | 1208 | else: |
|
1148 | 1209 | |
@@ -1153,12 +1214,15 b' def runtests(options, tests):' | |||
|
1153 | 1214 | _checkhglib("Tested") |
|
1154 | 1215 | print "# Ran %d tests, %d skipped, %d failed." % ( |
|
1155 | 1216 | tested, skipped + ignored, failed) |
|
1217 | if options.time: | |
|
1218 | outputtimes(options) | |
|
1156 | 1219 | |
|
1157 | 1220 | if options.anycoverage: |
|
1158 | 1221 | outputcoverage(options) |
|
1159 | 1222 | except KeyboardInterrupt: |
|
1160 | 1223 | failed = True |
|
1161 | print "\ninterrupted!" | |
|
1224 | if not options.child: | |
|
1225 | print "\ninterrupted!" | |
|
1162 | 1226 | |
|
1163 | 1227 | if failed: |
|
1164 | 1228 | sys.exit(1) |
@@ -1170,9 +1234,9 b' def main():' | |||
|
1170 | 1234 | |
|
1171 | 1235 | checktools() |
|
1172 | 1236 | |
|
1173 | if len(args) == 0: | |
|
1174 | args = os.listdir(".") | |
|
1175 | args.sort() | |
|
1237 | if len(args) == 0: | |
|
1238 | args = os.listdir(".") | |
|
1239 | args.sort() | |
|
1176 | 1240 | |
|
1177 | 1241 | tests = args |
|
1178 | 1242 | |
@@ -1188,6 +1252,7 b' def main():' | |||
|
1188 | 1252 | os.environ['no_proxy'] = '' |
|
1189 | 1253 | os.environ['NO_PROXY'] = '' |
|
1190 | 1254 | os.environ['TERM'] = 'xterm' |
|
1255 | os.environ['PYTHONHASHSEED'] = 'random' | |
|
1191 | 1256 | |
|
1192 | 1257 | # unset env related to hooks |
|
1193 | 1258 | for k in os.environ.keys(): |
@@ -140,7 +140,7 b' Extension disabled for lack of acl.sourc' | |||
|
140 | 140 | query 1; heads |
|
141 | 141 | searching for changes |
|
142 | 142 | all remote heads known locally |
|
143 |
invalid |
|
|
143 | invalid branchheads cache (served): tip differs | |
|
144 | 144 | listing keys for "bookmarks" |
|
145 | 145 | 3 changesets found |
|
146 | 146 | list of changesets: |
@@ -202,7 +202,7 b' No [acl.allow]/[acl.deny]' | |||
|
202 | 202 | query 1; heads |
|
203 | 203 | searching for changes |
|
204 | 204 | all remote heads known locally |
|
205 |
invalid |
|
|
205 | invalid branchheads cache (served): tip differs | |
|
206 | 206 | listing keys for "bookmarks" |
|
207 | 207 | 3 changesets found |
|
208 | 208 | list of changesets: |
@@ -274,7 +274,7 b' Empty [acl.allow]' | |||
|
274 | 274 | query 1; heads |
|
275 | 275 | searching for changes |
|
276 | 276 | all remote heads known locally |
|
277 |
invalid |
|
|
277 | invalid branchheads cache (served): tip differs | |
|
278 | 278 | listing keys for "bookmarks" |
|
279 | 279 | 3 changesets found |
|
280 | 280 | list of changesets: |
@@ -341,6 +341,7 b' fred is allowed inside foo/' | |||
|
341 | 341 | query 1; heads |
|
342 | 342 | searching for changes |
|
343 | 343 | all remote heads known locally |
|
344 | invalid branchheads cache (served): tip differs | |
|
344 | 345 | listing keys for "bookmarks" |
|
345 | 346 | 3 changesets found |
|
346 | 347 | list of changesets: |
@@ -412,6 +413,7 b' Empty [acl.deny]' | |||
|
412 | 413 | query 1; heads |
|
413 | 414 | searching for changes |
|
414 | 415 | all remote heads known locally |
|
416 | invalid branchheads cache (served): tip differs | |
|
415 | 417 | listing keys for "bookmarks" |
|
416 | 418 | 3 changesets found |
|
417 | 419 | list of changesets: |
@@ -480,6 +482,7 b' fred is allowed inside foo/, but not foo' | |||
|
480 | 482 | query 1; heads |
|
481 | 483 | searching for changes |
|
482 | 484 | all remote heads known locally |
|
485 | invalid branchheads cache (served): tip differs | |
|
483 | 486 | listing keys for "bookmarks" |
|
484 | 487 | 3 changesets found |
|
485 | 488 | list of changesets: |
@@ -553,6 +556,7 b' fred is allowed inside foo/, but not foo' | |||
|
553 | 556 | query 1; heads |
|
554 | 557 | searching for changes |
|
555 | 558 | all remote heads known locally |
|
559 | invalid branchheads cache (served): tip differs | |
|
556 | 560 | listing keys for "bookmarks" |
|
557 | 561 | 3 changesets found |
|
558 | 562 | list of changesets: |
@@ -623,6 +627,7 b' fred is allowed inside foo/, but not foo' | |||
|
623 | 627 | query 1; heads |
|
624 | 628 | searching for changes |
|
625 | 629 | all remote heads known locally |
|
630 | invalid branchheads cache (served): tip differs | |
|
626 | 631 | listing keys for "bookmarks" |
|
627 | 632 | 3 changesets found |
|
628 | 633 | list of changesets: |
@@ -695,6 +700,7 b' barney is allowed everywhere' | |||
|
695 | 700 | query 1; heads |
|
696 | 701 | searching for changes |
|
697 | 702 | all remote heads known locally |
|
703 | invalid branchheads cache (served): tip differs | |
|
698 | 704 | listing keys for "bookmarks" |
|
699 | 705 | 3 changesets found |
|
700 | 706 | list of changesets: |
@@ -773,7 +779,7 b' wilma can change files with a .txt exten' | |||
|
773 | 779 | query 1; heads |
|
774 | 780 | searching for changes |
|
775 | 781 | all remote heads known locally |
|
776 |
invalid |
|
|
782 | invalid branchheads cache (served): tip differs | |
|
777 | 783 | listing keys for "bookmarks" |
|
778 | 784 | 3 changesets found |
|
779 | 785 | list of changesets: |
@@ -853,6 +859,7 b' file specified by acl.config does not ex' | |||
|
853 | 859 | query 1; heads |
|
854 | 860 | searching for changes |
|
855 | 861 | all remote heads known locally |
|
862 | invalid branchheads cache (served): tip differs | |
|
856 | 863 | listing keys for "bookmarks" |
|
857 | 864 | 3 changesets found |
|
858 | 865 | list of changesets: |
@@ -927,6 +934,7 b' betty is allowed inside foo/ by a acl.co' | |||
|
927 | 934 | query 1; heads |
|
928 | 935 | searching for changes |
|
929 | 936 | all remote heads known locally |
|
937 | invalid branchheads cache (served): tip differs | |
|
930 | 938 | listing keys for "bookmarks" |
|
931 | 939 | 3 changesets found |
|
932 | 940 | list of changesets: |
@@ -1012,6 +1020,7 b' acl.config can set only [acl.allow]/[acl' | |||
|
1012 | 1020 | query 1; heads |
|
1013 | 1021 | searching for changes |
|
1014 | 1022 | all remote heads known locally |
|
1023 | invalid branchheads cache (served): tip differs | |
|
1015 | 1024 | listing keys for "bookmarks" |
|
1016 | 1025 | 3 changesets found |
|
1017 | 1026 | list of changesets: |
@@ -1091,7 +1100,7 b' fred is always allowed' | |||
|
1091 | 1100 | query 1; heads |
|
1092 | 1101 | searching for changes |
|
1093 | 1102 | all remote heads known locally |
|
1094 |
invalid |
|
|
1103 | invalid branchheads cache (served): tip differs | |
|
1095 | 1104 | listing keys for "bookmarks" |
|
1096 | 1105 | 3 changesets found |
|
1097 | 1106 | list of changesets: |
@@ -1167,7 +1176,7 b' no one is allowed inside foo/Bar/' | |||
|
1167 | 1176 | query 1; heads |
|
1168 | 1177 | searching for changes |
|
1169 | 1178 | all remote heads known locally |
|
1170 |
invalid |
|
|
1179 | invalid branchheads cache (served): tip differs | |
|
1171 | 1180 | listing keys for "bookmarks" |
|
1172 | 1181 | 3 changesets found |
|
1173 | 1182 | list of changesets: |
@@ -1243,6 +1252,7 b' OS-level groups' | |||
|
1243 | 1252 | query 1; heads |
|
1244 | 1253 | searching for changes |
|
1245 | 1254 | all remote heads known locally |
|
1255 | invalid branchheads cache (served): tip differs | |
|
1246 | 1256 | listing keys for "bookmarks" |
|
1247 | 1257 | 3 changesets found |
|
1248 | 1258 | list of changesets: |
@@ -1319,7 +1329,7 b' OS-level groups' | |||
|
1319 | 1329 | query 1; heads |
|
1320 | 1330 | searching for changes |
|
1321 | 1331 | all remote heads known locally |
|
1322 |
invalid |
|
|
1332 | invalid branchheads cache (served): tip differs | |
|
1323 | 1333 | listing keys for "bookmarks" |
|
1324 | 1334 | 3 changesets found |
|
1325 | 1335 | list of changesets: |
@@ -1517,7 +1527,6 b' Branch acl deny test' | |||
|
1517 | 1527 | query 1; heads |
|
1518 | 1528 | searching for changes |
|
1519 | 1529 | all remote heads known locally |
|
1520 | invalidating branch cache (tip differs) | |
|
1521 | 1530 | listing keys for "bookmarks" |
|
1522 | 1531 | 4 changesets found |
|
1523 | 1532 | list of changesets: |
@@ -1829,7 +1838,6 b' push foobar into the remote' | |||
|
1829 | 1838 | query 1; heads |
|
1830 | 1839 | searching for changes |
|
1831 | 1840 | all remote heads known locally |
|
1832 | invalidating branch cache (tip differs) | |
|
1833 | 1841 | listing keys for "bookmarks" |
|
1834 | 1842 | 4 changesets found |
|
1835 | 1843 | list of changesets: |
@@ -1917,7 +1925,6 b' Branch acl conflicting deny' | |||
|
1917 | 1925 | query 1; heads |
|
1918 | 1926 | searching for changes |
|
1919 | 1927 | all remote heads known locally |
|
1920 | invalidating branch cache (tip differs) | |
|
1921 | 1928 | listing keys for "bookmarks" |
|
1922 | 1929 | 4 changesets found |
|
1923 | 1930 | list of changesets: |
@@ -2073,7 +2080,6 b' Non-astro users must be denied' | |||
|
2073 | 2080 | query 1; heads |
|
2074 | 2081 | searching for changes |
|
2075 | 2082 | all remote heads known locally |
|
2076 | invalidating branch cache (tip differs) | |
|
2077 | 2083 | listing keys for "bookmarks" |
|
2078 | 2084 | 4 changesets found |
|
2079 | 2085 | list of changesets: |
@@ -86,7 +86,7 b' attack /tmp/test' | |||
|
86 | 86 | $ hg manifest -r4 |
|
87 | 87 | /tmp/test |
|
88 | 88 | $ hg update -Cr4 |
|
89 | abort: *: $TESTTMP/target//tmp/test (glob) | |
|
89 | abort: path contains illegal component: /tmp/test | |
|
90 | 90 | [255] |
|
91 | 91 | |
|
92 | 92 | $ cd .. |
@@ -222,21 +222,21 b' mark revsets instead of single revs' | |||
|
222 | 222 | Testing changeset 12:1941b52820a5 (23 changesets remaining, ~4 tests) |
|
223 | 223 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
224 | 224 | $ cat .hg/bisect.state |
|
225 | current 1941b52820a544549596820a8ae006842b0e2c64 | |
|
226 | skip 9d7d07bc967ca98ad0600c24953fd289ad5fa991 | |
|
227 | skip ce8f0998e922c179e80819d5066fbe46e2998784 | |
|
228 | skip e7fa0811edb063f6319531f0d0a865882138e180 | |
|
229 | skip a2e6ea4973e9196ddd3386493b0c214b41fd97d3 | |
|
230 | 225 | bad b99c7b9c8e11558adef3fad9af211c58d46f325b |
|
231 | 226 | bad 5cd978ea51499179507ee7b6f340d2dbaa401185 |
|
232 | 227 | bad db07c04beaca44cf24832541e7f4a2346a95275b |
|
233 | 228 | bad b53bea5e2fcb30d3e00bd3409507a5659ce0fd8b |
|
229 | current 1941b52820a544549596820a8ae006842b0e2c64 | |
|
234 | 230 | good 3efc6fd51aeb8594398044c6c846ca59ae021203 |
|
235 | 231 | good 288867a866e9adb7a29880b66936c874b80f4651 |
|
236 | 232 | good 8e0c2264c8af790daf3585ada0669d93dee09c83 |
|
237 | 233 | good b5bd63375ab9a290419f2024b7f4ee9ea7ce90a8 |
|
238 | 234 | good ed2d2f24b11c368fa8aa0da9f4e1db580abade59 |
|
239 | 235 | good 58c80a7c8a4025a94cedaf7b4a4e3124e8909a96 |
|
236 | skip 9d7d07bc967ca98ad0600c24953fd289ad5fa991 | |
|
237 | skip ce8f0998e922c179e80819d5066fbe46e2998784 | |
|
238 | skip e7fa0811edb063f6319531f0d0a865882138e180 | |
|
239 | skip a2e6ea4973e9196ddd3386493b0c214b41fd97d3 | |
|
240 | 240 | |
|
241 | 241 | bisect reverse test |
|
242 | 242 |
@@ -41,8 +41,8 b' import bookmark by name' | |||
|
41 | 41 | adding manifests |
|
42 | 42 | adding file changes |
|
43 | 43 | added 1 changesets with 1 changes to 1 files |
|
44 | adding remote bookmark X | |
|
44 | 45 | updating bookmark Y |
|
45 | adding remote bookmark X | |
|
46 | 46 | adding remote bookmark Z |
|
47 | 47 | (run 'hg update' to get a working copy) |
|
48 | 48 | $ hg bookmarks |
@@ -51,12 +51,12 b' import bookmark by name' | |||
|
51 | 51 | Z 0:4e3505fd9583 |
|
52 | 52 | $ hg debugpushkey ../a namespaces |
|
53 | 53 | bookmarks |
|
54 | phases | |
|
55 | 54 | namespaces |
|
56 | 55 | obsolete |
|
56 | phases | |
|
57 | 57 | $ hg debugpushkey ../a bookmarks |
|
58 | X 4e3505fd95835d721066b76e75dbb8cc554d7f77 | |
|
58 | 59 | Y 4e3505fd95835d721066b76e75dbb8cc554d7f77 |
|
59 | X 4e3505fd95835d721066b76e75dbb8cc554d7f77 | |
|
60 | 60 | Z 4e3505fd95835d721066b76e75dbb8cc554d7f77 |
|
61 | 61 | $ hg pull -B X ../a |
|
62 | 62 | pulling from ../a |
@@ -145,9 +145,9 b' divergent bookmarks' | |||
|
145 | 145 | adding manifests |
|
146 | 146 | adding file changes |
|
147 | 147 | added 1 changesets with 1 changes to 1 files (+1 heads) |
|
148 | divergent bookmark @ stored as @foo | |
|
148 | 149 | divergent bookmark X stored as X@foo |
|
149 | 150 | updating bookmark Z |
|
150 | divergent bookmark @ stored as @foo | |
|
151 | 151 | (run 'hg heads' to see heads, 'hg merge' to merge) |
|
152 | 152 | $ hg book |
|
153 | 153 | @ 1:9b140be10808 |
@@ -292,16 +292,16 b' hgweb' | |||
|
292 | 292 | |
|
293 | 293 | $ hg debugpushkey http://localhost:$HGPORT/ namespaces |
|
294 | 294 | bookmarks |
|
295 | phases | |
|
296 | 295 | namespaces |
|
297 | 296 | obsolete |
|
297 | phases | |
|
298 | 298 | $ hg debugpushkey http://localhost:$HGPORT/ bookmarks |
|
299 | 299 | @ 9b140be1080824d768c5a4691a564088eede71f9 |
|
300 | X 9b140be1080824d768c5a4691a564088eede71f9 | |
|
301 | Y c922c0139ca03858f655e4a2af4dd02796a63969 | |
|
302 | Z 0d2164f0ce0d8f1d6f94351eba04b794909be66c | |
|
300 | 303 | foo 0000000000000000000000000000000000000000 |
|
301 | 304 | foobar 9b140be1080824d768c5a4691a564088eede71f9 |
|
302 | Y c922c0139ca03858f655e4a2af4dd02796a63969 | |
|
303 | X 9b140be1080824d768c5a4691a564088eede71f9 | |
|
304 | Z 0d2164f0ce0d8f1d6f94351eba04b794909be66c | |
|
305 | 305 | $ hg out -B http://localhost:$HGPORT/ |
|
306 | 306 | comparing with http://localhost:$HGPORT/ |
|
307 | 307 | searching for changed bookmarks |
@@ -324,10 +324,10 b' hgweb' | |||
|
324 | 324 | pulling from http://localhost:$HGPORT/ |
|
325 | 325 | no changes found |
|
326 | 326 | divergent bookmark @ stored as @1 |
|
327 | divergent bookmark X stored as X@1 | |
|
328 | adding remote bookmark Z | |
|
327 | 329 | adding remote bookmark foo |
|
328 | 330 | adding remote bookmark foobar |
|
329 | divergent bookmark X stored as X@1 | |
|
330 | adding remote bookmark Z | |
|
331 | 331 | importing bookmark Z |
|
332 | 332 | $ hg clone http://localhost:$HGPORT/ cloned-bookmarks |
|
333 | 333 | requesting all changes |
@@ -40,9 +40,9 b' look up bookmark' | |||
|
40 | 40 | summary: 0 |
|
41 | 41 | |
|
42 | 42 | |
|
43 | second bookmark for rev 0 | |
|
43 | second bookmark for rev 0, command should work even with ui.strict on | |
|
44 | 44 | |
|
45 |
$ |
|
|
45 | $ hg --config ui.strict=1 bookmark X2 | |
|
46 | 46 | |
|
47 | 47 | bookmark rev -1 again |
|
48 | 48 |
@@ -444,6 +444,33 b' Unbundle incremental bundles into fresh ' | |||
|
444 | 444 | added 1 changesets with 1 changes to 1 files |
|
445 | 445 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
446 | 446 | |
|
447 | View full contents of the bundle | |
|
448 | $ hg -R test bundle --base null -r 3 ../partial.hg | |
|
449 | 4 changesets found | |
|
450 | $ cd test | |
|
451 | $ hg -R ../../partial.hg log -r "bundle()" | |
|
452 | changeset: 0:f9ee2f85a263 | |
|
453 | user: test | |
|
454 | date: Thu Jan 01 00:00:00 1970 +0000 | |
|
455 | summary: 0.0 | |
|
456 | ||
|
457 | changeset: 1:34c2bf6b0626 | |
|
458 | user: test | |
|
459 | date: Thu Jan 01 00:00:00 1970 +0000 | |
|
460 | summary: 0.1 | |
|
461 | ||
|
462 | changeset: 2:e38ba6f5b7e0 | |
|
463 | user: test | |
|
464 | date: Thu Jan 01 00:00:00 1970 +0000 | |
|
465 | summary: 0.2 | |
|
466 | ||
|
467 | changeset: 3:eebf5a27f8ca | |
|
468 | user: test | |
|
469 | date: Thu Jan 01 00:00:00 1970 +0000 | |
|
470 | summary: 0.3 | |
|
471 | ||
|
472 | $ cd .. | |
|
473 | ||
|
447 | 474 | test for 540d1059c802 |
|
448 | 475 | |
|
449 | 476 | test for 540d1059c802 |
@@ -5,163 +5,7 b'' | |||
|
5 | 5 | > echo "skipped: not a Mercurial working dir" >&2 |
|
6 | 6 | > exit 80 |
|
7 | 7 | > fi |
|
8 | $ hg manifest | xargs "$check_code" || echo 'FAILURE IS NOT AN OPTION!!!' | |
|
9 | 8 | |
|
10 | $ hg manifest | xargs "$check_code" --warnings --nolineno --per-file=0 || true | |
|
11 | hgext/convert/cvsps.py:0: | |
|
12 | > ui.write('Ancestors: %s\n' % (','.join(r))) | |
|
13 | warning: unwrapped ui message | |
|
14 | hgext/convert/cvsps.py:0: | |
|
15 | > ui.write('Parent: %d\n' % cs.parents[0].id) | |
|
16 | warning: unwrapped ui message | |
|
17 | hgext/convert/cvsps.py:0: | |
|
18 | > ui.write('Parents: %s\n' % | |
|
19 | warning: unwrapped ui message | |
|
20 | hgext/convert/cvsps.py:0: | |
|
21 | > ui.write('Branchpoints: %s \n' % ', '.join(branchpoints)) | |
|
22 | warning: unwrapped ui message | |
|
23 | hgext/convert/cvsps.py:0: | |
|
24 | > ui.write('Author: %s\n' % cs.author) | |
|
25 | warning: unwrapped ui message | |
|
26 | hgext/convert/cvsps.py:0: | |
|
27 | > ui.write('Branch: %s\n' % (cs.branch or 'HEAD')) | |
|
28 | warning: unwrapped ui message | |
|
29 | hgext/convert/cvsps.py:0: | |
|
30 | > ui.write('Date: %s\n' % util.datestr(cs.date, | |
|
31 | warning: unwrapped ui message | |
|
32 | hgext/convert/cvsps.py:0: | |
|
33 | > ui.write('Log:\n') | |
|
34 | warning: unwrapped ui message | |
|
35 | hgext/convert/cvsps.py:0: | |
|
36 | > ui.write('Members: \n') | |
|
37 | warning: unwrapped ui message | |
|
38 | hgext/convert/cvsps.py:0: | |
|
39 | > ui.write('PatchSet %d \n' % cs.id) | |
|
40 | warning: unwrapped ui message | |
|
41 | hgext/convert/cvsps.py:0: | |
|
42 | > ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1], | |
|
43 | warning: unwrapped ui message | |
|
44 | hgext/hgk.py:0: | |
|
45 | > ui.write("parent %s\n" % p) | |
|
46 | warning: unwrapped ui message | |
|
47 | hgext/hgk.py:0: | |
|
48 | > ui.write('k=%s\nv=%s\n' % (name, value)) | |
|
49 | warning: unwrapped ui message | |
|
50 | hgext/hgk.py:0: | |
|
51 | > ui.write("author %s %s %s\n" % (ctx.user(), int(date[0]), date[1])) | |
|
52 | warning: unwrapped ui message | |
|
53 | hgext/hgk.py:0: | |
|
54 | > ui.write("branch %s\n\n" % ctx.branch()) | |
|
55 | warning: unwrapped ui message | |
|
56 | hgext/hgk.py:0: | |
|
57 | > ui.write("committer %s %s %s\n" % (committer, int(date[0]), date[1])) | |
|
58 | warning: unwrapped ui message | |
|
59 | hgext/hgk.py:0: | |
|
60 | > ui.write("revision %d\n" % ctx.rev()) | |
|
61 | warning: unwrapped ui message | |
|
62 | hgext/hgk.py:0: | |
|
63 | > ui.write("tree %s\n" % short(ctx.changeset()[0])) | |
|
64 | warning: unwrapped ui message | |
|
65 | hgext/patchbomb.py:0: | |
|
66 | > ui.write('Subject: %s\n' % subj) | |
|
67 | warning: unwrapped ui message | |
|
68 | hgext/patchbomb.py:0: | |
|
69 | > ui.write('From: %s\n' % sender) | |
|
70 | warning: unwrapped ui message | |
|
71 | mercurial/commands.py:0: | |
|
72 | > ui.note('branch %s\n' % data) | |
|
73 | warning: unwrapped ui message | |
|
74 | mercurial/commands.py:0: | |
|
75 | > ui.note('node %s\n' % str(data)) | |
|
76 | warning: unwrapped ui message | |
|
77 | mercurial/commands.py:0: | |
|
78 | > ui.note('tag %s\n' % name) | |
|
79 | warning: unwrapped ui message | |
|
80 | mercurial/commands.py:0: | |
|
81 | > ui.write("unpruned common: %s\n" % " ".join([short(n) | |
|
82 | warning: unwrapped ui message | |
|
83 | mercurial/commands.py:0: | |
|
84 | > ui.write("format: id, p1, p2, cset, delta base, len(delta)\n") | |
|
85 | warning: unwrapped ui message | |
|
86 | mercurial/commands.py:0: | |
|
87 | > ui.write("local is subset\n") | |
|
88 | warning: unwrapped ui message | |
|
89 | mercurial/commands.py:0: | |
|
90 | > ui.write("remote is subset\n") | |
|
91 | warning: unwrapped ui message | |
|
92 | mercurial/commands.py:0: | |
|
93 | > ui.write('deltas against other : ' + fmt % pcfmt(numother, | |
|
94 | warning: unwrapped ui message | |
|
95 | mercurial/commands.py:0: | |
|
96 | > ui.write('deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas)) | |
|
97 | warning: unwrapped ui message | |
|
98 | mercurial/commands.py:0: | |
|
99 | > ui.write('deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas)) | |
|
100 | warning: unwrapped ui message | |
|
101 | mercurial/commands.py:0: | |
|
102 | > ui.write("common heads: %s\n" % " ".join([short(n) for n in common])) | |
|
103 | warning: unwrapped ui message | |
|
104 | mercurial/commands.py:0: | |
|
105 | > ui.write("match: %s\n" % m(d[0])) | |
|
106 | warning: unwrapped ui message | |
|
107 | mercurial/commands.py:0: | |
|
108 | > ui.write('deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)) | |
|
109 | warning: unwrapped ui message | |
|
110 | mercurial/commands.py:0: | |
|
111 | > ui.write('path %s\n' % k) | |
|
112 | warning: unwrapped ui message | |
|
113 | mercurial/commands.py:0: | |
|
114 | > ui.write('uncompressed data size (min/max/avg) : %d / %d / %d\n' | |
|
115 | warning: unwrapped ui message | |
|
116 | mercurial/commands.py:0: | |
|
117 | > ui.write("digraph G {\n") | |
|
118 | warning: unwrapped ui message | |
|
119 | mercurial/commands.py:0: | |
|
120 | > ui.write("internal: %s %s\n" % d) | |
|
121 | warning: unwrapped ui message | |
|
122 | mercurial/commands.py:0: | |
|
123 | > ui.write("standard: %s\n" % util.datestr(d)) | |
|
124 | warning: unwrapped ui message | |
|
125 | mercurial/commands.py:0: | |
|
126 | > ui.write('avg chain length : ' + fmt % avgchainlen) | |
|
127 | warning: unwrapped ui message | |
|
128 | mercurial/commands.py:0: | |
|
129 | > ui.write('case-sensitive: %s\n' % (util.checkcase('.debugfsinfo') | |
|
130 | warning: unwrapped ui message | |
|
131 | mercurial/commands.py:0: | |
|
132 | > ui.write('compression ratio : ' + fmt % compratio) | |
|
133 | warning: unwrapped ui message | |
|
134 | mercurial/commands.py:0: | |
|
135 | > ui.write('delta size (min/max/avg) : %d / %d / %d\n' | |
|
136 | warning: unwrapped ui message | |
|
137 | mercurial/commands.py:0: | |
|
138 | > ui.write('exec: %s\n' % (util.checkexec(path) and 'yes' or 'no')) | |
|
139 | warning: unwrapped ui message | |
|
140 | mercurial/commands.py:0: | |
|
141 | > ui.write('flags : %s\n' % ', '.join(flags)) | |
|
142 | warning: unwrapped ui message | |
|
143 | mercurial/commands.py:0: | |
|
144 | > ui.write('format : %d\n' % format) | |
|
145 | warning: unwrapped ui message | |
|
146 | mercurial/commands.py:0: | |
|
147 | > ui.write('full revision size (min/max/avg) : %d / %d / %d\n' | |
|
148 | warning: unwrapped ui message | |
|
149 | mercurial/commands.py:0: | |
|
150 | > ui.write('revision size : ' + fmt2 % totalsize) | |
|
151 | warning: unwrapped ui message | |
|
152 | mercurial/commands.py:0: | |
|
153 | > ui.write('revisions : ' + fmt2 % numrevs) | |
|
154 | warning: unwrapped ui message | |
|
155 | warning: unwrapped ui message | |
|
156 | mercurial/commands.py:0: | |
|
157 | > ui.write('symlink: %s\n' % (util.checklink(path) and 'yes' or 'no')) | |
|
158 | warning: unwrapped ui message | |
|
159 | tests/autodiff.py:0: | |
|
160 | > ui.write('data lost for: %s\n' % fn) | |
|
161 | warning: unwrapped ui message | |
|
162 | tests/test-ui-color.py:0: | |
|
163 | > testui.warn('warning\n') | |
|
164 | warning: unwrapped ui message | |
|
165 | tests/test-ui-color.py:0: | |
|
166 | > testui.write('buffered\n') | |
|
167 | warning: unwrapped ui message | |
|
9 | New errors are not allowed. Warnings are strongly discouraged. | |
|
10 | ||
|
11 | $ hg manifest | xargs "$check_code" --warnings --nolineno --per-file=0 |
@@ -110,6 +110,18 b'' | |||
|
110 | 110 | > class empty(): |
|
111 | 111 | class foo() not available in Python 2.4, use class foo(object) |
|
112 | 112 | [1] |
|
113 | $ cat > python3-compat.py << EOF | |
|
114 | > foo <> bar | |
|
115 | > reduce(lambda a, b: a + b, [1, 2, 3, 4]) | |
|
116 | > EOF | |
|
117 | $ "$check_code" python3-compat.py | |
|
118 | python3-compat.py:1: | |
|
119 | > foo <> bar | |
|
120 | <> operator is not available in Python 3+, use != | |
|
121 | python3-compat.py:2: | |
|
122 | > reduce(lambda a, b: a + b, [1, 2, 3, 4]) | |
|
123 | reduce is not available in Python 3+ | |
|
124 | [1] | |
|
113 | 125 | |
|
114 | 126 | $ cat > is-op.py <<EOF |
|
115 | 127 | > # is-operator comparing number or string literal |
@@ -159,3 +171,14 b'' | |||
|
159 | 171 | > except: |
|
160 | 172 | warning: naked except clause |
|
161 | 173 | [1] |
|
174 | ||
|
175 | $ cat > raise-format.py <<EOF | |
|
176 | > raise SomeException, message | |
|
177 | > # this next line is okay | |
|
178 | > raise SomeException(arg1, arg2) | |
|
179 | > EOF | |
|
180 | $ "$check_code" raise-format.py | |
|
181 | raise-format.py:1: | |
|
182 | > raise SomeException, message | |
|
183 | don't use old-style two-argument raise, use Exception(message) | |
|
184 | [1] |
@@ -37,16 +37,16 b' churn separate directories' | |||
|
37 | 37 | churn all |
|
38 | 38 | |
|
39 | 39 | $ hg churn |
|
40 | user1 3 *************************************************************** | |
|
40 | 41 | user3 3 *************************************************************** |
|
41 | user1 3 *************************************************************** | |
|
42 | 42 | user2 2 ****************************************** |
|
43 | 43 | |
|
44 | 44 | churn excluding one dir |
|
45 | 45 | |
|
46 | 46 | $ hg churn -X e |
|
47 | 47 | user3 3 *************************************************************** |
|
48 | user1 2 ****************************************** | |
|
48 | 49 | user2 2 ****************************************** |
|
49 | user1 2 ****************************************** | |
|
50 | 50 | |
|
51 | 51 | churn up to rev 2 |
|
52 | 52 | |
@@ -68,16 +68,16 b' churn with .hgchurn' | |||
|
68 | 68 | $ mv ../aliases .hgchurn |
|
69 | 69 | $ hg churn |
|
70 | 70 | skipping malformed alias: not-an-alias |
|
71 | alias1 3 ************************************************************** | |
|
71 | 72 | alias3 3 ************************************************************** |
|
72 | alias1 3 ************************************************************** | |
|
73 | 73 | user2 2 ***************************************** |
|
74 | 74 | $ rm .hgchurn |
|
75 | 75 | |
|
76 | 76 | churn with column specifier |
|
77 | 77 | |
|
78 | 78 | $ COLUMNS=40 hg churn |
|
79 | user1 3 *********************** | |
|
79 | 80 | user3 3 *********************** |
|
80 | user1 3 *********************** | |
|
81 | 81 | user2 2 *************** |
|
82 | 82 | |
|
83 | 83 | churn by hour |
@@ -155,8 +155,8 b' Ignore trailing or leading spaces in ema' | |||
|
155 | 155 | $ hg churn -c |
|
156 | 156 | user1 4 ********************************************************* |
|
157 | 157 | user3 3 ******************************************* |
|
158 | user2 2 ***************************** | |
|
158 | 159 | user4@x.com 2 ***************************** |
|
159 | user2 2 ***************************** | |
|
160 | 160 | with space 1 ************** |
|
161 | 161 | |
|
162 | 162 | $ cd .. |
@@ -558,7 +558,7 b' Inaccessible destination' | |||
|
558 | 558 | $ hg init b |
|
559 | 559 | $ cd b |
|
560 | 560 | $ hg clone . ../a |
|
561 | abort: Permission denied: ../a | |
|
561 | abort: Permission denied: '../a' | |
|
562 | 562 | [255] |
|
563 | 563 | $ cd .. |
|
564 | 564 | $ chmod 700 a |
@@ -75,6 +75,7 b' defaults.backout=-d "0 0"' | |||
|
75 | 75 | defaults.commit=-d "0 0" |
|
76 | 76 | defaults.tag=-d "0 0" |
|
77 | 77 | ui.slash=True |
|
78 | ui.interactive=False | |
|
78 | 79 | ui.foo=bar |
|
79 | 80 | runcommand init foo |
|
80 | 81 | runcommand -R foo showconfig ui defaults |
@@ -82,6 +83,7 b' defaults.backout=-d "0 0"' | |||
|
82 | 83 | defaults.commit=-d "0 0" |
|
83 | 84 | defaults.tag=-d "0 0" |
|
84 | 85 | ui.slash=True |
|
86 | ui.interactive=False | |
|
85 | 87 | |
|
86 | 88 | testing hookoutput: |
|
87 | 89 |
@@ -488,14 +488,33 b' ride of)' | |||
|
488 | 488 | | | |
|
489 | 489 | |
|
490 | 490 | |
|
491 |
Test that amend does not make it easy to create obsole |
|
|
491 | Test that amend does not make it easy to create obsolescence cycle | |
|
492 | 492 | --------------------------------------------------------------------- |
|
493 | 493 | |
|
494 | ||
|
495 | $ hg id -r 14 | |
|
494 | $ hg id -r 14 --hidden | |
|
496 | 495 | b650e6ee8614 (a) |
|
497 | $ hg revert -ar 14 | |
|
496 | $ hg revert -ar 14 --hidden | |
|
498 | 497 | reverting a |
|
499 | 498 | $ hg commit --amend |
|
500 | 499 | $ hg id |
|
501 | 500 | b99e5df575f7 (a) tip |
|
501 | ||
|
502 | Test that rewriting leaving instability behind is allowed | |
|
503 | --------------------------------------------------------------------- | |
|
504 | ||
|
505 | $ hg up '.^' | |
|
506 | 0 files updated, 0 files merged, 1 files removed, 0 files unresolved | |
|
507 | $ echo 'b' >> a | |
|
508 | $ hg log --style compact -r 'children(.)' | |
|
509 | 18[tip]:11 b99e5df575f7 1970-01-01 00:00 +0000 test | |
|
510 | babar | |
|
511 | ||
|
512 | $ hg commit --amend | |
|
513 | $ hg log -r 'unstable()' | |
|
514 | changeset: 18:b99e5df575f7 | |
|
515 | branch: a | |
|
516 | parent: 11:3334b7925910 | |
|
517 | user: test | |
|
518 | date: Thu Jan 01 00:00:00 1970 +0000 | |
|
519 | summary: babar | |
|
520 |
@@ -82,7 +82,7 b' incremental conversion' | |||
|
82 | 82 | pulling from branch0 into branch2 |
|
83 | 83 | 4 changesets found |
|
84 | 84 | 0 c3 |
|
85 |
pulling from branch |
|
|
85 | pulling from branch1 into branch3 | |
|
86 | 86 | 5 changesets found |
|
87 |
pulling from branch |
|
|
87 | pulling from branch2 into branch3 | |
|
88 | 88 | 1 changesets found |
@@ -183,8 +183,8 b' convert to hg' | |||
|
183 | 183 | sorting... |
|
184 | 184 | converting... |
|
185 | 185 | 9 add file1 on trunk |
|
186 | 8 add text | |
|
187 | 7 unrelated change | |
|
186 | 8 unrelated change | |
|
187 | 7 add text | |
|
188 | 188 | 6 add text [MERGE from v1_0] |
|
189 | 189 | 5 add text [MERGE from v1_1] |
|
190 | 190 | 4 add file2 on trunk |
@@ -204,8 +204,8 b' complete log' | |||
|
204 | 204 | 5: '' add file2 on trunk |
|
205 | 205 | 4: '' add text [MERGE from v1_1] |
|
206 | 206 | 3: 'v1_1' add text [MERGE from v1_0] |
|
207 | 2: 'v1_1' unrelated change | |
|
208 |
1: 'v1_ |
|
|
207 | 2: 'v1_0' add text | |
|
208 | 1: 'v1_1' unrelated change | |
|
209 | 209 | 0: '' add file1 on trunk |
|
210 | 210 | |
|
211 | 211 | graphical log |
@@ -225,9 +225,9 b' graphical log' | |||
|
225 | 225 | |\| |
|
226 | 226 | | o 3: 'v1_1' add text [MERGE from v1_0] |
|
227 | 227 | | |\ |
|
228 |
+---o 2: 'v1_ |
|
|
228 | +---o 2: 'v1_0' add text | |
|
229 | 229 | | | |
|
230 |
| o 1: 'v1_ |
|
|
230 | | o 1: 'v1_1' unrelated change | |
|
231 | 231 | |/ |
|
232 | 232 | o 0: '' add file1 on trunk |
|
233 | 233 |
@@ -140,32 +140,35 b' convert to hg (#1)' | |||
|
140 | 140 | collecting CVS rlog |
|
141 | 141 | 15 log entries |
|
142 | 142 | creating changesets |
|
143 |
|
|
|
143 | 9 changeset entries | |
|
144 | 144 | sorting... |
|
145 | 145 | converting... |
|
146 |
|
|
|
147 |
|
|
|
148 | 5 add file3, file4 on branch v1_1 | |
|
149 | 4 MERGE from v1_0: add file2 | |
|
146 | 8 add file1 on trunk | |
|
147 | 7 add file2 | |
|
148 | 6 MERGE from v1_0: add file2 | |
|
149 | 5 file file3 was initially added on branch v1_1. | |
|
150 | 4 add file3, file4 on branch v1_1 | |
|
150 | 151 | 3 add file5 on v1_2 |
|
151 | 152 | 2 add file6 on trunk post-v1_2 |
|
152 |
1 MERGE from |
|
|
153 |
0 MERGE from |
|
|
153 | 1 MERGE from HEAD: add file6 | |
|
154 | 0 MERGE from v1_2: add file5 | |
|
154 | 155 | |
|
155 | 156 | hg glog output (#1) |
|
156 | 157 | |
|
157 | 158 | $ hg -R proj.hg glog --template "{rev} {desc}\n" |
|
158 |
o |
|
|
159 | o 8 MERGE from v1_2: add file5 | |
|
159 | 160 | | |
|
160 |
| o |
|
|
161 | | o 7 MERGE from HEAD: add file6 | |
|
161 | 162 | | | |
|
162 |
| |
|
|
163 | o | 6 add file6 on trunk post-v1_2 | |
|
163 | 164 | | | |
|
164 |
o |
|
|
165 | |/ | |
|
166 | | o 3 MERGE from v1_0: add file2 | |
|
165 | | o 5 add file5 on v1_2 | |
|
167 | 166 | | | |
|
168 |
| o |
|
|
167 | | | o 4 add file3, file4 on branch v1_1 | |
|
168 | | | | | |
|
169 | o | | 3 file file3 was initially added on branch v1_1. | |
|
170 | |/ / | |
|
171 | | o 2 MERGE from v1_0: add file2 | |
|
169 | 172 | |/ |
|
170 | 173 | | o 1 add file2 |
|
171 | 174 | |/ |
@@ -184,32 +187,35 b' convert to hg (#2: with merge detection)' | |||
|
184 | 187 | collecting CVS rlog |
|
185 | 188 | 15 log entries |
|
186 | 189 | creating changesets |
|
187 |
|
|
|
190 | 9 changeset entries | |
|
188 | 191 | sorting... |
|
189 | 192 | converting... |
|
190 |
|
|
|
191 |
|
|
|
192 | 5 add file3, file4 on branch v1_1 | |
|
193 | 4 MERGE from v1_0: add file2 | |
|
193 | 8 add file1 on trunk | |
|
194 | 7 add file2 | |
|
195 | 6 MERGE from v1_0: add file2 | |
|
196 | 5 file file3 was initially added on branch v1_1. | |
|
197 | 4 add file3, file4 on branch v1_1 | |
|
194 | 198 | 3 add file5 on v1_2 |
|
195 | 199 | 2 add file6 on trunk post-v1_2 |
|
196 |
1 MERGE from |
|
|
197 |
0 MERGE from |
|
|
200 | 1 MERGE from HEAD: add file6 | |
|
201 | 0 MERGE from v1_2: add file5 | |
|
198 | 202 | |
|
199 | 203 | hg glog output (#2) |
|
200 | 204 | |
|
201 | 205 | $ hg -R proj.hg2 glog --template "{rev} {desc}\n" |
|
202 |
o |
|
|
206 | o 8 MERGE from v1_2: add file5 | |
|
203 | 207 | | |
|
204 |
| o |
|
|
208 | | o 7 MERGE from HEAD: add file6 | |
|
205 | 209 | | | |
|
206 |
| |
|
|
210 | o | 6 add file6 on trunk post-v1_2 | |
|
207 | 211 | | | |
|
208 |
o |
|
|
209 | |/ | |
|
210 | | o 3 MERGE from v1_0: add file2 | |
|
212 | | o 5 add file5 on v1_2 | |
|
211 | 213 | | | |
|
212 |
| o |
|
|
214 | | | o 4 add file3, file4 on branch v1_1 | |
|
215 | | | | | |
|
216 | o | | 3 file file3 was initially added on branch v1_1. | |
|
217 | |/ / | |
|
218 | | o 2 MERGE from v1_0: add file2 | |
|
213 | 219 | |/ |
|
214 | 220 | | o 1 add file2 |
|
215 | 221 | |/ |
@@ -69,9 +69,16 b' commit a new revision changing b/c' | |||
|
69 | 69 | $TESTTMP/cvsrepo/src/b/c,v <-- *c (glob) |
|
70 | 70 | $ cd .. |
|
71 | 71 | |
|
72 | convert fresh repo | |
|
72 | convert fresh repo and also check localtimezone option | |
|
73 | ||
|
74 | NOTE: This doesn't check all time zones -- it merely determines that | |
|
75 | the configuration option is taking effect. | |
|
73 | 76 | |
|
74 | $ hg convert src src-hg | |
|
77 | An arbitrary (U.S.) time zone is used here. TZ=US/Hawaii is selected | |
|
78 | since it does not use DST (unlike other U.S. time zones) and is always | |
|
79 | a fixed difference from UTC. | |
|
80 | ||
|
81 | $ TZ=US/Hawaii hg convert --config convert.localtimezone=True src src-hg | |
|
75 | 82 | initializing destination src-hg repository |
|
76 | 83 | connecting to $TESTTMP/cvsrepo |
|
77 | 84 | scanning source... |
@@ -84,8 +91,8 b' convert fresh repo' | |||
|
84 | 91 | sorting... |
|
85 | 92 | converting... |
|
86 | 93 | 2 Initial revision |
|
87 | 1 import | |
|
88 | 0 ci0 | |
|
94 | 1 ci0 | |
|
95 | 0 import | |
|
89 | 96 | updating tags |
|
90 | 97 | $ hgcat a |
|
91 | 98 | a |
@@ -109,10 +116,10 b' convert fresh repo with --filemap' | |||
|
109 | 116 | sorting... |
|
110 | 117 | converting... |
|
111 | 118 | 2 Initial revision |
|
112 | 1 import | |
|
119 | 1 ci0 | |
|
120 | 0 import | |
|
113 | 121 | filtering out empty revision |
|
114 |
repository tip rolled back to revision |
|
|
115 | 0 ci0 | |
|
122 | repository tip rolled back to revision 1 (undo commit) | |
|
116 | 123 | updating tags |
|
117 | 124 | $ hgcat b/c |
|
118 | 125 | c |
@@ -161,7 +168,7 b' commit new file revisions' | |||
|
161 | 168 | |
|
162 | 169 | convert again |
|
163 | 170 | |
|
164 | $ hg convert src src-hg | |
|
171 | $ TZ=US/Hawaii hg convert --config convert.localtimezone=True src src-hg | |
|
165 | 172 |
|
|
166 | 173 |
|
|
167 | 174 |
|
@@ -221,7 +228,7 b' commit branch' | |||
|
221 | 228 | |
|
222 | 229 | convert again |
|
223 | 230 | |
|
224 | $ hg convert src src-hg | |
|
231 | $ TZ=US/Hawaii hg convert --config convert.localtimezone=True src src-hg | |
|
225 | 232 |
|
|
226 | 233 |
|
|
227 | 234 |
|
@@ -239,7 +246,7 b' convert again' | |||
|
239 | 246 | |
|
240 | 247 | convert again with --filemap |
|
241 | 248 | |
|
242 |
|
|
|
249 | $ TZ=US/Hawaii hg convert --config convert.localtimezone=True --filemap filemap src src-filemap | |
|
243 | 250 |
|
|
244 | 251 |
|
|
245 | 252 |
|
@@ -286,7 +293,7 b' commit new file revisions with some fuzz' | |||
|
286 | 293 | |
|
287 | 294 | convert again |
|
288 | 295 | |
|
289 |
|
|
|
296 | $ TZ=US/Hawaii hg convert --config convert.cvsps.fuzz=2 --config convert.localtimezone=True src src-hg | |
|
290 | 297 |
|
|
291 | 298 |
|
|
292 | 299 |
|
@@ -300,25 +307,25 b' convert again' | |||
|
300 | 307 |
|
|
301 | 308 |
|
|
302 | 309 |
|
|
303 |
|
|
|
304 | o 8 (branch) fuzzy files: b/c | |
|
310 | $ hg -R src-hg glog --template '{rev} ({branches}) {desc} date: {date|date} files: {files}\n' | |
|
311 | o 8 (branch) fuzzy date: * -1000 files: b/c (glob) | |
|
305 | 312 | | |
|
306 | o 7 (branch) fuzzy files: a | |
|
313 | o 7 (branch) fuzzy date: * -1000 files: a (glob) | |
|
307 | 314 | | |
|
308 | 315 | o 6 (branch) funny |
|
309 | 316 | | ---------------------------- |
|
310 | | log message files: a | |
|
311 | o 5 (branch) ci2 files: b/c | |
|
317 | | log message date: * -1000 files: a (glob) | |
|
318 | o 5 (branch) ci2 date: * -1000 files: b/c (glob) | |
|
312 | 319 |
|
|
313 | o 4 () ci1 files: a b/c | |
|
320 | o 4 () ci1 date: * -1000 files: a b/c (glob) | |
|
314 | 321 | | |
|
315 | o 3 () update tags files: .hgtags | |
|
316 | | | |
|
317 | o 2 () ci0 files: b/c | |
|
322 | o 3 () update tags date: * +0000 files: .hgtags (glob) | |
|
318 | 323 | | |
|
319 |
| o |
|
|
324 | | o 2 (INITIAL) import date: * -1000 files: (glob) | |
|
325 | | | | |
|
326 | o | 1 () ci0 date: * -1000 files: b/c (glob) | |
|
320 | 327 | |/ |
|
321 | o 0 () Initial revision files: a b/c | |
|
328 | o 0 () Initial revision date: * -1000 files: a b/c (glob) | |
|
322 | 329 |
|
|
323 | 330 | |
|
324 | 331 | testing debugcvsps |
@@ -388,12 +395,11 b' testing debugcvsps' | |||
|
388 | 395 | Author: * (glob) |
|
389 | 396 | Branch: HEAD |
|
390 | 397 | Tag: (none) |
|
391 | Branchpoints: branch | |
|
392 | 398 | Log: |
|
393 | 399 | ci1 |
|
394 | 400 |
|
|
395 | 401 | Members: |
|
396 |
|
|
|
402 | b/c:1.2->1.3 | |
|
397 | 403 |
|
|
398 | 404 | --------------------- |
|
399 | 405 | PatchSet 6 |
@@ -401,11 +407,12 b' testing debugcvsps' | |||
|
401 | 407 | Author: * (glob) |
|
402 | 408 | Branch: HEAD |
|
403 | 409 | Tag: (none) |
|
410 | Branchpoints: branch | |
|
404 | 411 | Log: |
|
405 | 412 | ci1 |
|
406 | 413 |
|
|
407 | 414 | Members: |
|
408 |
|
|
|
415 | a:1.1->1.2 | |
|
409 | 416 |
|
|
410 | 417 | --------------------- |
|
411 | 418 | PatchSet 7 |
@@ -116,7 +116,7 b' return to trunk and merge MYBRANCH1_2' | |||
|
116 | 116 | Author: user |
|
117 | 117 | Branch: HEAD |
|
118 | 118 | Tag: (none) |
|
119 |
Branchpoints: MYBRANCH1 |
|
|
119 | Branchpoints: MYBRANCH1, MYBRANCH1_1 | |
|
120 | 120 | Log: |
|
121 | 121 | foo.txt |
|
122 | 122 |
@@ -298,3 +298,50 b' damage git repository and convert again' | |||
|
298 | 298 | $ hg convert git-repo4 git-repo4-broken-hg 2>&1 | \ |
|
299 | 299 | > grep 'abort:' | sed 's/abort:.*/abort:/g' |
|
300 | 300 | abort: |
|
301 | ||
|
302 | test sub modules | |
|
303 | ||
|
304 | $ mkdir git-repo5 | |
|
305 | $ cd git-repo5 | |
|
306 | $ git init-db >/dev/null 2>/dev/null | |
|
307 | $ echo 'sub' >> foo | |
|
308 | $ git add foo | |
|
309 | $ commit -a -m 'addfoo' | |
|
310 | $ BASE=${PWD} | |
|
311 | $ cd .. | |
|
312 | $ mkdir git-repo6 | |
|
313 | $ cd git-repo6 | |
|
314 | $ git init-db >/dev/null 2>/dev/null | |
|
315 | $ git submodule add ${BASE} >/dev/null 2>/dev/null | |
|
316 | $ commit -a -m 'addsubmodule' >/dev/null 2>/dev/null | |
|
317 | $ cd .. | |
|
318 | ||
|
319 | convert sub modules | |
|
320 | $ hg convert git-repo6 git-repo6-hg | |
|
321 | initializing destination git-repo6-hg repository | |
|
322 | scanning source... | |
|
323 | sorting... | |
|
324 | converting... | |
|
325 | 0 addsubmodule | |
|
326 | updating bookmarks | |
|
327 | $ hg -R git-repo6-hg log -v | |
|
328 | changeset: 0:* (glob) | |
|
329 | bookmark: master | |
|
330 | tag: tip | |
|
331 | user: nottest <test@example.org> | |
|
332 | date: Mon Jan 01 00:00:23 2007 +0000 | |
|
333 | files: .hgsub .hgsubstate | |
|
334 | description: | |
|
335 | addsubmodule | |
|
336 | ||
|
337 | committer: test <test@example.org> | |
|
338 | ||
|
339 | ||
|
340 | ||
|
341 | $ cd git-repo6-hg | |
|
342 | $ hg up >/dev/null 2>/dev/null | |
|
343 | $ cat .hgsubstate | |
|
344 | * git-repo5 (glob) | |
|
345 | $ cd git-repo5 | |
|
346 | $ cat foo | |
|
347 | sub |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: file was removed | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: file was removed | |
The requested commit or file is too big and content was truncated. Show full diff |
General Comments 0
You need to be logged in to leave comments.
Login now