Show More
@@ -1,108 +1,108 b'' | |||||
1 | # filelog.py - file history class for mercurial |
|
1 | # filelog.py - file history class for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2005 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms |
|
5 | # This software may be used and distributed according to the terms | |
6 | # of the GNU General Public License, incorporated herein by reference. |
|
6 | # of the GNU General Public License, incorporated herein by reference. | |
7 |
|
7 | |||
8 | import os |
|
8 | import os | |
9 | from revlog import * |
|
9 | from revlog import * | |
10 | from demandload import * |
|
10 | from demandload import * | |
11 | demandload(globals(), "bdiff") |
|
11 | demandload(globals(), "bdiff") | |
12 |
|
12 | |||
13 | class filelog(revlog): |
|
13 | class filelog(revlog): | |
14 |
def __init__(self, opener, path, defversion= |
|
14 | def __init__(self, opener, path, defversion=REVLOG_DEFAULT_VERSION): | |
15 | revlog.__init__(self, opener, |
|
15 | revlog.__init__(self, opener, | |
16 | os.path.join("data", self.encodedir(path + ".i")), |
|
16 | os.path.join("data", self.encodedir(path + ".i")), | |
17 | os.path.join("data", self.encodedir(path + ".d")), |
|
17 | os.path.join("data", self.encodedir(path + ".d")), | |
18 | defversion) |
|
18 | defversion) | |
19 |
|
19 | |||
20 | # This avoids a collision between a file named foo and a dir named |
|
20 | # This avoids a collision between a file named foo and a dir named | |
21 | # foo.i or foo.d |
|
21 | # foo.i or foo.d | |
22 | def encodedir(self, path): |
|
22 | def encodedir(self, path): | |
23 | return (path |
|
23 | return (path | |
24 | .replace(".hg/", ".hg.hg/") |
|
24 | .replace(".hg/", ".hg.hg/") | |
25 | .replace(".i/", ".i.hg/") |
|
25 | .replace(".i/", ".i.hg/") | |
26 | .replace(".d/", ".d.hg/")) |
|
26 | .replace(".d/", ".d.hg/")) | |
27 |
|
27 | |||
28 | def decodedir(self, path): |
|
28 | def decodedir(self, path): | |
29 | return (path |
|
29 | return (path | |
30 | .replace(".d.hg/", ".d/") |
|
30 | .replace(".d.hg/", ".d/") | |
31 | .replace(".i.hg/", ".i/") |
|
31 | .replace(".i.hg/", ".i/") | |
32 | .replace(".hg.hg/", ".hg/")) |
|
32 | .replace(".hg.hg/", ".hg/")) | |
33 |
|
33 | |||
34 | def read(self, node): |
|
34 | def read(self, node): | |
35 | t = self.revision(node) |
|
35 | t = self.revision(node) | |
36 | if not t.startswith('\1\n'): |
|
36 | if not t.startswith('\1\n'): | |
37 | return t |
|
37 | return t | |
38 | s = t.find('\1\n', 2) |
|
38 | s = t.find('\1\n', 2) | |
39 | return t[s+2:] |
|
39 | return t[s+2:] | |
40 |
|
40 | |||
41 | def readmeta(self, node): |
|
41 | def readmeta(self, node): | |
42 | t = self.revision(node) |
|
42 | t = self.revision(node) | |
43 | if not t.startswith('\1\n'): |
|
43 | if not t.startswith('\1\n'): | |
44 | return {} |
|
44 | return {} | |
45 | s = t.find('\1\n', 2) |
|
45 | s = t.find('\1\n', 2) | |
46 | mt = t[2:s] |
|
46 | mt = t[2:s] | |
47 | m = {} |
|
47 | m = {} | |
48 | for l in mt.splitlines(): |
|
48 | for l in mt.splitlines(): | |
49 | k, v = l.split(": ", 1) |
|
49 | k, v = l.split(": ", 1) | |
50 | m[k] = v |
|
50 | m[k] = v | |
51 | return m |
|
51 | return m | |
52 |
|
52 | |||
53 | def add(self, text, meta, transaction, link, p1=None, p2=None): |
|
53 | def add(self, text, meta, transaction, link, p1=None, p2=None): | |
54 | if meta or text.startswith('\1\n'): |
|
54 | if meta or text.startswith('\1\n'): | |
55 | mt = "" |
|
55 | mt = "" | |
56 | if meta: |
|
56 | if meta: | |
57 | mt = [ "%s: %s\n" % (k, v) for k,v in meta.items() ] |
|
57 | mt = [ "%s: %s\n" % (k, v) for k,v in meta.items() ] | |
58 | text = "\1\n%s\1\n%s" % ("".join(mt), text) |
|
58 | text = "\1\n%s\1\n%s" % ("".join(mt), text) | |
59 | return self.addrevision(text, transaction, link, p1, p2) |
|
59 | return self.addrevision(text, transaction, link, p1, p2) | |
60 |
|
60 | |||
61 | def renamed(self, node): |
|
61 | def renamed(self, node): | |
62 | if self.parents(node)[0] != nullid: |
|
62 | if self.parents(node)[0] != nullid: | |
63 | return False |
|
63 | return False | |
64 | m = self.readmeta(node) |
|
64 | m = self.readmeta(node) | |
65 | if m and m.has_key("copy"): |
|
65 | if m and m.has_key("copy"): | |
66 | return (m["copy"], bin(m["copyrev"])) |
|
66 | return (m["copy"], bin(m["copyrev"])) | |
67 | return False |
|
67 | return False | |
68 |
|
68 | |||
69 | def annotate(self, node): |
|
69 | def annotate(self, node): | |
70 |
|
70 | |||
71 | def decorate(text, rev): |
|
71 | def decorate(text, rev): | |
72 | return ([rev] * len(text.splitlines()), text) |
|
72 | return ([rev] * len(text.splitlines()), text) | |
73 |
|
73 | |||
74 | def pair(parent, child): |
|
74 | def pair(parent, child): | |
75 | for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]): |
|
75 | for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]): | |
76 | child[0][b1:b2] = parent[0][a1:a2] |
|
76 | child[0][b1:b2] = parent[0][a1:a2] | |
77 | return child |
|
77 | return child | |
78 |
|
78 | |||
79 | # find all ancestors |
|
79 | # find all ancestors | |
80 | needed = {node:1} |
|
80 | needed = {node:1} | |
81 | visit = [node] |
|
81 | visit = [node] | |
82 | while visit: |
|
82 | while visit: | |
83 | n = visit.pop(0) |
|
83 | n = visit.pop(0) | |
84 | for p in self.parents(n): |
|
84 | for p in self.parents(n): | |
85 | if p not in needed: |
|
85 | if p not in needed: | |
86 | needed[p] = 1 |
|
86 | needed[p] = 1 | |
87 | visit.append(p) |
|
87 | visit.append(p) | |
88 | else: |
|
88 | else: | |
89 | # count how many times we'll use this |
|
89 | # count how many times we'll use this | |
90 | needed[p] += 1 |
|
90 | needed[p] += 1 | |
91 |
|
91 | |||
92 | # sort by revision which is a topological order |
|
92 | # sort by revision which is a topological order | |
93 | visit = [ (self.rev(n), n) for n in needed.keys() ] |
|
93 | visit = [ (self.rev(n), n) for n in needed.keys() ] | |
94 | visit.sort() |
|
94 | visit.sort() | |
95 | hist = {} |
|
95 | hist = {} | |
96 |
|
96 | |||
97 | for r,n in visit: |
|
97 | for r,n in visit: | |
98 | curr = decorate(self.read(n), self.linkrev(n)) |
|
98 | curr = decorate(self.read(n), self.linkrev(n)) | |
99 | for p in self.parents(n): |
|
99 | for p in self.parents(n): | |
100 | if p != nullid: |
|
100 | if p != nullid: | |
101 | curr = pair(hist[p], curr) |
|
101 | curr = pair(hist[p], curr) | |
102 | # trim the history of unneeded revs |
|
102 | # trim the history of unneeded revs | |
103 | needed[p] -= 1 |
|
103 | needed[p] -= 1 | |
104 | if not needed[p]: |
|
104 | if not needed[p]: | |
105 | del hist[p] |
|
105 | del hist[p] | |
106 | hist[n] = curr |
|
106 | hist[n] = curr | |
107 |
|
107 | |||
108 | return zip(hist[n][0], hist[n][1].splitlines(1)) |
|
108 | return zip(hist[n][0], hist[n][1].splitlines(1)) |
@@ -1,2072 +1,2076 b'' | |||||
1 | # localrepo.py - read/write repository class for mercurial |
|
1 | # localrepo.py - read/write repository class for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2005 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms |
|
5 | # This software may be used and distributed according to the terms | |
6 | # of the GNU General Public License, incorporated herein by reference. |
|
6 | # of the GNU General Public License, incorporated herein by reference. | |
7 |
|
7 | |||
8 | import os, util |
|
8 | import os, util | |
9 | import filelog, manifest, changelog, dirstate, repo |
|
9 | import filelog, manifest, changelog, dirstate, repo | |
10 | from node import * |
|
10 | from node import * | |
11 | from i18n import gettext as _ |
|
11 | from i18n import gettext as _ | |
12 | from demandload import * |
|
12 | from demandload import * | |
13 | demandload(globals(), "appendfile changegroup") |
|
13 | demandload(globals(), "appendfile changegroup") | |
14 | demandload(globals(), "re lock transaction tempfile stat mdiff errno ui") |
|
14 | demandload(globals(), "re lock transaction tempfile stat mdiff errno ui") | |
15 | demandload(globals(), "revlog traceback") |
|
15 | demandload(globals(), "revlog traceback") | |
16 |
|
16 | |||
17 | class localrepository(object): |
|
17 | class localrepository(object): | |
18 | def __del__(self): |
|
18 | def __del__(self): | |
19 | self.transhandle = None |
|
19 | self.transhandle = None | |
20 | def __init__(self, parentui, path=None, create=0): |
|
20 | def __init__(self, parentui, path=None, create=0): | |
21 | if not path: |
|
21 | if not path: | |
22 | p = os.getcwd() |
|
22 | p = os.getcwd() | |
23 | while not os.path.isdir(os.path.join(p, ".hg")): |
|
23 | while not os.path.isdir(os.path.join(p, ".hg")): | |
24 | oldp = p |
|
24 | oldp = p | |
25 | p = os.path.dirname(p) |
|
25 | p = os.path.dirname(p) | |
26 | if p == oldp: |
|
26 | if p == oldp: | |
27 | raise repo.RepoError(_("no repo found")) |
|
27 | raise repo.RepoError(_("no repo found")) | |
28 | path = p |
|
28 | path = p | |
29 | self.path = os.path.join(path, ".hg") |
|
29 | self.path = os.path.join(path, ".hg") | |
30 |
|
30 | |||
31 | if not create and not os.path.isdir(self.path): |
|
31 | if not create and not os.path.isdir(self.path): | |
32 | raise repo.RepoError(_("repository %s not found") % path) |
|
32 | raise repo.RepoError(_("repository %s not found") % path) | |
33 |
|
33 | |||
34 | self.root = os.path.abspath(path) |
|
34 | self.root = os.path.abspath(path) | |
35 | self.origroot = path |
|
35 | self.origroot = path | |
36 | self.ui = ui.ui(parentui=parentui) |
|
36 | self.ui = ui.ui(parentui=parentui) | |
37 | self.opener = util.opener(self.path) |
|
37 | self.opener = util.opener(self.path) | |
38 | self.wopener = util.opener(self.root) |
|
38 | self.wopener = util.opener(self.root) | |
39 |
|
39 | |||
40 | try: |
|
40 | try: | |
41 | self.ui.readconfig(self.join("hgrc"), self.root) |
|
41 | self.ui.readconfig(self.join("hgrc"), self.root) | |
42 | except IOError: |
|
42 | except IOError: | |
43 | pass |
|
43 | pass | |
44 |
|
44 | |||
45 | v = self.ui.revlogopts |
|
45 | v = self.ui.revlogopts | |
46 |
self.revlogversion = int(v.get('format', revlog.REVLOG |
|
46 | self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT)) | |
47 | self.revlogv1 = self.revlogversion != revlog.REVLOGV0 |
|
47 | self.revlogv1 = self.revlogversion != revlog.REVLOGV0 | |
|
48 | fl = v.get('flags', None) | |||
48 | flags = 0 |
|
49 | flags = 0 | |
49 | for x in v.get('flags', "").split(): |
|
50 | if fl != None: | |
50 |
f |
|
51 | for x in fl.split(): | |
|
52 | flags |= revlog.flagstr(x) | |||
|
53 | elif self.revlogv1: | |||
|
54 | flags = revlog.REVLOG_DEFAULT_FLAGS | |||
51 |
|
55 | |||
52 | v = self.revlogversion | flags |
|
56 | v = self.revlogversion | flags | |
53 | self.manifest = manifest.manifest(self.opener, v) |
|
57 | self.manifest = manifest.manifest(self.opener, v) | |
54 | self.changelog = changelog.changelog(self.opener, v) |
|
58 | self.changelog = changelog.changelog(self.opener, v) | |
55 |
|
59 | |||
56 | # the changelog might not have the inline index flag |
|
60 | # the changelog might not have the inline index flag | |
57 | # on. If the format of the changelog is the same as found in |
|
61 | # on. If the format of the changelog is the same as found in | |
58 | # .hgrc, apply any flags found in the .hgrc as well. |
|
62 | # .hgrc, apply any flags found in the .hgrc as well. | |
59 | # Otherwise, just version from the changelog |
|
63 | # Otherwise, just version from the changelog | |
60 | v = self.changelog.version |
|
64 | v = self.changelog.version | |
61 | if v == self.revlogversion: |
|
65 | if v == self.revlogversion: | |
62 | v |= flags |
|
66 | v |= flags | |
63 | self.revlogversion = v |
|
67 | self.revlogversion = v | |
64 |
|
68 | |||
65 | self.tagscache = None |
|
69 | self.tagscache = None | |
66 | self.nodetagscache = None |
|
70 | self.nodetagscache = None | |
67 | self.encodepats = None |
|
71 | self.encodepats = None | |
68 | self.decodepats = None |
|
72 | self.decodepats = None | |
69 | self.transhandle = None |
|
73 | self.transhandle = None | |
70 |
|
74 | |||
71 | if create: |
|
75 | if create: | |
72 | os.mkdir(self.path) |
|
76 | os.mkdir(self.path) | |
73 | os.mkdir(self.join("data")) |
|
77 | os.mkdir(self.join("data")) | |
74 |
|
78 | |||
75 | self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root) |
|
79 | self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root) | |
76 |
|
80 | |||
77 | def hook(self, name, throw=False, **args): |
|
81 | def hook(self, name, throw=False, **args): | |
78 | def callhook(hname, funcname): |
|
82 | def callhook(hname, funcname): | |
79 | '''call python hook. hook is callable object, looked up as |
|
83 | '''call python hook. hook is callable object, looked up as | |
80 | name in python module. if callable returns "true", hook |
|
84 | name in python module. if callable returns "true", hook | |
81 | fails, else passes. if hook raises exception, treated as |
|
85 | fails, else passes. if hook raises exception, treated as | |
82 | hook failure. exception propagates if throw is "true". |
|
86 | hook failure. exception propagates if throw is "true". | |
83 |
|
87 | |||
84 | reason for "true" meaning "hook failed" is so that |
|
88 | reason for "true" meaning "hook failed" is so that | |
85 | unmodified commands (e.g. mercurial.commands.update) can |
|
89 | unmodified commands (e.g. mercurial.commands.update) can | |
86 | be run as hooks without wrappers to convert return values.''' |
|
90 | be run as hooks without wrappers to convert return values.''' | |
87 |
|
91 | |||
88 | self.ui.note(_("calling hook %s: %s\n") % (hname, funcname)) |
|
92 | self.ui.note(_("calling hook %s: %s\n") % (hname, funcname)) | |
89 | d = funcname.rfind('.') |
|
93 | d = funcname.rfind('.') | |
90 | if d == -1: |
|
94 | if d == -1: | |
91 | raise util.Abort(_('%s hook is invalid ("%s" not in a module)') |
|
95 | raise util.Abort(_('%s hook is invalid ("%s" not in a module)') | |
92 | % (hname, funcname)) |
|
96 | % (hname, funcname)) | |
93 | modname = funcname[:d] |
|
97 | modname = funcname[:d] | |
94 | try: |
|
98 | try: | |
95 | obj = __import__(modname) |
|
99 | obj = __import__(modname) | |
96 | except ImportError: |
|
100 | except ImportError: | |
97 | raise util.Abort(_('%s hook is invalid ' |
|
101 | raise util.Abort(_('%s hook is invalid ' | |
98 | '(import of "%s" failed)') % |
|
102 | '(import of "%s" failed)') % | |
99 | (hname, modname)) |
|
103 | (hname, modname)) | |
100 | try: |
|
104 | try: | |
101 | for p in funcname.split('.')[1:]: |
|
105 | for p in funcname.split('.')[1:]: | |
102 | obj = getattr(obj, p) |
|
106 | obj = getattr(obj, p) | |
103 | except AttributeError, err: |
|
107 | except AttributeError, err: | |
104 | raise util.Abort(_('%s hook is invalid ' |
|
108 | raise util.Abort(_('%s hook is invalid ' | |
105 | '("%s" is not defined)') % |
|
109 | '("%s" is not defined)') % | |
106 | (hname, funcname)) |
|
110 | (hname, funcname)) | |
107 | if not callable(obj): |
|
111 | if not callable(obj): | |
108 | raise util.Abort(_('%s hook is invalid ' |
|
112 | raise util.Abort(_('%s hook is invalid ' | |
109 | '("%s" is not callable)') % |
|
113 | '("%s" is not callable)') % | |
110 | (hname, funcname)) |
|
114 | (hname, funcname)) | |
111 | try: |
|
115 | try: | |
112 | r = obj(ui=self.ui, repo=self, hooktype=name, **args) |
|
116 | r = obj(ui=self.ui, repo=self, hooktype=name, **args) | |
113 | except (KeyboardInterrupt, util.SignalInterrupt): |
|
117 | except (KeyboardInterrupt, util.SignalInterrupt): | |
114 | raise |
|
118 | raise | |
115 | except Exception, exc: |
|
119 | except Exception, exc: | |
116 | if isinstance(exc, util.Abort): |
|
120 | if isinstance(exc, util.Abort): | |
117 | self.ui.warn(_('error: %s hook failed: %s\n') % |
|
121 | self.ui.warn(_('error: %s hook failed: %s\n') % | |
118 | (hname, exc.args[0] % exc.args[1:])) |
|
122 | (hname, exc.args[0] % exc.args[1:])) | |
119 | else: |
|
123 | else: | |
120 | self.ui.warn(_('error: %s hook raised an exception: ' |
|
124 | self.ui.warn(_('error: %s hook raised an exception: ' | |
121 | '%s\n') % (hname, exc)) |
|
125 | '%s\n') % (hname, exc)) | |
122 | if throw: |
|
126 | if throw: | |
123 | raise |
|
127 | raise | |
124 | if self.ui.traceback: |
|
128 | if self.ui.traceback: | |
125 | traceback.print_exc() |
|
129 | traceback.print_exc() | |
126 | return True |
|
130 | return True | |
127 | if r: |
|
131 | if r: | |
128 | if throw: |
|
132 | if throw: | |
129 | raise util.Abort(_('%s hook failed') % hname) |
|
133 | raise util.Abort(_('%s hook failed') % hname) | |
130 | self.ui.warn(_('warning: %s hook failed\n') % hname) |
|
134 | self.ui.warn(_('warning: %s hook failed\n') % hname) | |
131 | return r |
|
135 | return r | |
132 |
|
136 | |||
133 | def runhook(name, cmd): |
|
137 | def runhook(name, cmd): | |
134 | self.ui.note(_("running hook %s: %s\n") % (name, cmd)) |
|
138 | self.ui.note(_("running hook %s: %s\n") % (name, cmd)) | |
135 | env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()] + |
|
139 | env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()] + | |
136 | [(k.upper(), v) for k, v in args.iteritems()]) |
|
140 | [(k.upper(), v) for k, v in args.iteritems()]) | |
137 | r = util.system(cmd, environ=env, cwd=self.root) |
|
141 | r = util.system(cmd, environ=env, cwd=self.root) | |
138 | if r: |
|
142 | if r: | |
139 | desc, r = util.explain_exit(r) |
|
143 | desc, r = util.explain_exit(r) | |
140 | if throw: |
|
144 | if throw: | |
141 | raise util.Abort(_('%s hook %s') % (name, desc)) |
|
145 | raise util.Abort(_('%s hook %s') % (name, desc)) | |
142 | self.ui.warn(_('warning: %s hook %s\n') % (name, desc)) |
|
146 | self.ui.warn(_('warning: %s hook %s\n') % (name, desc)) | |
143 | return r |
|
147 | return r | |
144 |
|
148 | |||
145 | r = False |
|
149 | r = False | |
146 | hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks") |
|
150 | hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks") | |
147 | if hname.split(".", 1)[0] == name and cmd] |
|
151 | if hname.split(".", 1)[0] == name and cmd] | |
148 | hooks.sort() |
|
152 | hooks.sort() | |
149 | for hname, cmd in hooks: |
|
153 | for hname, cmd in hooks: | |
150 | if cmd.startswith('python:'): |
|
154 | if cmd.startswith('python:'): | |
151 | r = callhook(hname, cmd[7:].strip()) or r |
|
155 | r = callhook(hname, cmd[7:].strip()) or r | |
152 | else: |
|
156 | else: | |
153 | r = runhook(hname, cmd) or r |
|
157 | r = runhook(hname, cmd) or r | |
154 | return r |
|
158 | return r | |
155 |
|
159 | |||
156 | def tags(self): |
|
160 | def tags(self): | |
157 | '''return a mapping of tag to node''' |
|
161 | '''return a mapping of tag to node''' | |
158 | if not self.tagscache: |
|
162 | if not self.tagscache: | |
159 | self.tagscache = {} |
|
163 | self.tagscache = {} | |
160 |
|
164 | |||
161 | def parsetag(line, context): |
|
165 | def parsetag(line, context): | |
162 | if not line: |
|
166 | if not line: | |
163 | return |
|
167 | return | |
164 | s = l.split(" ", 1) |
|
168 | s = l.split(" ", 1) | |
165 | if len(s) != 2: |
|
169 | if len(s) != 2: | |
166 | self.ui.warn(_("%s: ignoring invalid tag\n") % context) |
|
170 | self.ui.warn(_("%s: ignoring invalid tag\n") % context) | |
167 | return |
|
171 | return | |
168 | node, key = s |
|
172 | node, key = s | |
169 | try: |
|
173 | try: | |
170 | bin_n = bin(node) |
|
174 | bin_n = bin(node) | |
171 | except TypeError: |
|
175 | except TypeError: | |
172 | self.ui.warn(_("%s: ignoring invalid tag\n") % context) |
|
176 | self.ui.warn(_("%s: ignoring invalid tag\n") % context) | |
173 | return |
|
177 | return | |
174 | if bin_n not in self.changelog.nodemap: |
|
178 | if bin_n not in self.changelog.nodemap: | |
175 | self.ui.warn(_("%s: ignoring invalid tag\n") % context) |
|
179 | self.ui.warn(_("%s: ignoring invalid tag\n") % context) | |
176 | return |
|
180 | return | |
177 | self.tagscache[key.strip()] = bin_n |
|
181 | self.tagscache[key.strip()] = bin_n | |
178 |
|
182 | |||
179 | # read each head of the tags file, ending with the tip |
|
183 | # read each head of the tags file, ending with the tip | |
180 | # and add each tag found to the map, with "newer" ones |
|
184 | # and add each tag found to the map, with "newer" ones | |
181 | # taking precedence |
|
185 | # taking precedence | |
182 | fl = self.file(".hgtags") |
|
186 | fl = self.file(".hgtags") | |
183 | h = fl.heads() |
|
187 | h = fl.heads() | |
184 | h.reverse() |
|
188 | h.reverse() | |
185 | for r in h: |
|
189 | for r in h: | |
186 | count = 0 |
|
190 | count = 0 | |
187 | for l in fl.read(r).splitlines(): |
|
191 | for l in fl.read(r).splitlines(): | |
188 | count += 1 |
|
192 | count += 1 | |
189 | parsetag(l, ".hgtags:%d" % count) |
|
193 | parsetag(l, ".hgtags:%d" % count) | |
190 |
|
194 | |||
191 | try: |
|
195 | try: | |
192 | f = self.opener("localtags") |
|
196 | f = self.opener("localtags") | |
193 | count = 0 |
|
197 | count = 0 | |
194 | for l in f: |
|
198 | for l in f: | |
195 | count += 1 |
|
199 | count += 1 | |
196 | parsetag(l, "localtags:%d" % count) |
|
200 | parsetag(l, "localtags:%d" % count) | |
197 | except IOError: |
|
201 | except IOError: | |
198 | pass |
|
202 | pass | |
199 |
|
203 | |||
200 | self.tagscache['tip'] = self.changelog.tip() |
|
204 | self.tagscache['tip'] = self.changelog.tip() | |
201 |
|
205 | |||
202 | return self.tagscache |
|
206 | return self.tagscache | |
203 |
|
207 | |||
204 | def tagslist(self): |
|
208 | def tagslist(self): | |
205 | '''return a list of tags ordered by revision''' |
|
209 | '''return a list of tags ordered by revision''' | |
206 | l = [] |
|
210 | l = [] | |
207 | for t, n in self.tags().items(): |
|
211 | for t, n in self.tags().items(): | |
208 | try: |
|
212 | try: | |
209 | r = self.changelog.rev(n) |
|
213 | r = self.changelog.rev(n) | |
210 | except: |
|
214 | except: | |
211 | r = -2 # sort to the beginning of the list if unknown |
|
215 | r = -2 # sort to the beginning of the list if unknown | |
212 | l.append((r, t, n)) |
|
216 | l.append((r, t, n)) | |
213 | l.sort() |
|
217 | l.sort() | |
214 | return [(t, n) for r, t, n in l] |
|
218 | return [(t, n) for r, t, n in l] | |
215 |
|
219 | |||
216 | def nodetags(self, node): |
|
220 | def nodetags(self, node): | |
217 | '''return the tags associated with a node''' |
|
221 | '''return the tags associated with a node''' | |
218 | if not self.nodetagscache: |
|
222 | if not self.nodetagscache: | |
219 | self.nodetagscache = {} |
|
223 | self.nodetagscache = {} | |
220 | for t, n in self.tags().items(): |
|
224 | for t, n in self.tags().items(): | |
221 | self.nodetagscache.setdefault(n, []).append(t) |
|
225 | self.nodetagscache.setdefault(n, []).append(t) | |
222 | return self.nodetagscache.get(node, []) |
|
226 | return self.nodetagscache.get(node, []) | |
223 |
|
227 | |||
224 | def lookup(self, key): |
|
228 | def lookup(self, key): | |
225 | try: |
|
229 | try: | |
226 | return self.tags()[key] |
|
230 | return self.tags()[key] | |
227 | except KeyError: |
|
231 | except KeyError: | |
228 | try: |
|
232 | try: | |
229 | return self.changelog.lookup(key) |
|
233 | return self.changelog.lookup(key) | |
230 | except: |
|
234 | except: | |
231 | raise repo.RepoError(_("unknown revision '%s'") % key) |
|
235 | raise repo.RepoError(_("unknown revision '%s'") % key) | |
232 |
|
236 | |||
233 | def dev(self): |
|
237 | def dev(self): | |
234 | return os.stat(self.path).st_dev |
|
238 | return os.stat(self.path).st_dev | |
235 |
|
239 | |||
236 | def local(self): |
|
240 | def local(self): | |
237 | return True |
|
241 | return True | |
238 |
|
242 | |||
239 | def join(self, f): |
|
243 | def join(self, f): | |
240 | return os.path.join(self.path, f) |
|
244 | return os.path.join(self.path, f) | |
241 |
|
245 | |||
242 | def wjoin(self, f): |
|
246 | def wjoin(self, f): | |
243 | return os.path.join(self.root, f) |
|
247 | return os.path.join(self.root, f) | |
244 |
|
248 | |||
245 | def file(self, f): |
|
249 | def file(self, f): | |
246 | if f[0] == '/': |
|
250 | if f[0] == '/': | |
247 | f = f[1:] |
|
251 | f = f[1:] | |
248 | return filelog.filelog(self.opener, f, self.revlogversion) |
|
252 | return filelog.filelog(self.opener, f, self.revlogversion) | |
249 |
|
253 | |||
250 | def getcwd(self): |
|
254 | def getcwd(self): | |
251 | return self.dirstate.getcwd() |
|
255 | return self.dirstate.getcwd() | |
252 |
|
256 | |||
253 | def wfile(self, f, mode='r'): |
|
257 | def wfile(self, f, mode='r'): | |
254 | return self.wopener(f, mode) |
|
258 | return self.wopener(f, mode) | |
255 |
|
259 | |||
256 | def wread(self, filename): |
|
260 | def wread(self, filename): | |
257 | if self.encodepats == None: |
|
261 | if self.encodepats == None: | |
258 | l = [] |
|
262 | l = [] | |
259 | for pat, cmd in self.ui.configitems("encode"): |
|
263 | for pat, cmd in self.ui.configitems("encode"): | |
260 | mf = util.matcher(self.root, "", [pat], [], [])[1] |
|
264 | mf = util.matcher(self.root, "", [pat], [], [])[1] | |
261 | l.append((mf, cmd)) |
|
265 | l.append((mf, cmd)) | |
262 | self.encodepats = l |
|
266 | self.encodepats = l | |
263 |
|
267 | |||
264 | data = self.wopener(filename, 'r').read() |
|
268 | data = self.wopener(filename, 'r').read() | |
265 |
|
269 | |||
266 | for mf, cmd in self.encodepats: |
|
270 | for mf, cmd in self.encodepats: | |
267 | if mf(filename): |
|
271 | if mf(filename): | |
268 | self.ui.debug(_("filtering %s through %s\n") % (filename, cmd)) |
|
272 | self.ui.debug(_("filtering %s through %s\n") % (filename, cmd)) | |
269 | data = util.filter(data, cmd) |
|
273 | data = util.filter(data, cmd) | |
270 | break |
|
274 | break | |
271 |
|
275 | |||
272 | return data |
|
276 | return data | |
273 |
|
277 | |||
274 | def wwrite(self, filename, data, fd=None): |
|
278 | def wwrite(self, filename, data, fd=None): | |
275 | if self.decodepats == None: |
|
279 | if self.decodepats == None: | |
276 | l = [] |
|
280 | l = [] | |
277 | for pat, cmd in self.ui.configitems("decode"): |
|
281 | for pat, cmd in self.ui.configitems("decode"): | |
278 | mf = util.matcher(self.root, "", [pat], [], [])[1] |
|
282 | mf = util.matcher(self.root, "", [pat], [], [])[1] | |
279 | l.append((mf, cmd)) |
|
283 | l.append((mf, cmd)) | |
280 | self.decodepats = l |
|
284 | self.decodepats = l | |
281 |
|
285 | |||
282 | for mf, cmd in self.decodepats: |
|
286 | for mf, cmd in self.decodepats: | |
283 | if mf(filename): |
|
287 | if mf(filename): | |
284 | self.ui.debug(_("filtering %s through %s\n") % (filename, cmd)) |
|
288 | self.ui.debug(_("filtering %s through %s\n") % (filename, cmd)) | |
285 | data = util.filter(data, cmd) |
|
289 | data = util.filter(data, cmd) | |
286 | break |
|
290 | break | |
287 |
|
291 | |||
288 | if fd: |
|
292 | if fd: | |
289 | return fd.write(data) |
|
293 | return fd.write(data) | |
290 | return self.wopener(filename, 'w').write(data) |
|
294 | return self.wopener(filename, 'w').write(data) | |
291 |
|
295 | |||
292 | def transaction(self): |
|
296 | def transaction(self): | |
293 | tr = self.transhandle |
|
297 | tr = self.transhandle | |
294 | if tr != None and tr.running(): |
|
298 | if tr != None and tr.running(): | |
295 | return tr.nest() |
|
299 | return tr.nest() | |
296 |
|
300 | |||
297 | # save dirstate for undo |
|
301 | # save dirstate for undo | |
298 | try: |
|
302 | try: | |
299 | ds = self.opener("dirstate").read() |
|
303 | ds = self.opener("dirstate").read() | |
300 | except IOError: |
|
304 | except IOError: | |
301 | ds = "" |
|
305 | ds = "" | |
302 | self.opener("journal.dirstate", "w").write(ds) |
|
306 | self.opener("journal.dirstate", "w").write(ds) | |
303 |
|
307 | |||
304 | tr = transaction.transaction(self.ui.warn, self.opener, |
|
308 | tr = transaction.transaction(self.ui.warn, self.opener, | |
305 | self.join("journal"), |
|
309 | self.join("journal"), | |
306 | aftertrans(self.path)) |
|
310 | aftertrans(self.path)) | |
307 | self.transhandle = tr |
|
311 | self.transhandle = tr | |
308 | return tr |
|
312 | return tr | |
309 |
|
313 | |||
310 | def recover(self): |
|
314 | def recover(self): | |
311 | l = self.lock() |
|
315 | l = self.lock() | |
312 | if os.path.exists(self.join("journal")): |
|
316 | if os.path.exists(self.join("journal")): | |
313 | self.ui.status(_("rolling back interrupted transaction\n")) |
|
317 | self.ui.status(_("rolling back interrupted transaction\n")) | |
314 | transaction.rollback(self.opener, self.join("journal")) |
|
318 | transaction.rollback(self.opener, self.join("journal")) | |
315 | self.reload() |
|
319 | self.reload() | |
316 | return True |
|
320 | return True | |
317 | else: |
|
321 | else: | |
318 | self.ui.warn(_("no interrupted transaction available\n")) |
|
322 | self.ui.warn(_("no interrupted transaction available\n")) | |
319 | return False |
|
323 | return False | |
320 |
|
324 | |||
321 | def undo(self, wlock=None): |
|
325 | def undo(self, wlock=None): | |
322 | if not wlock: |
|
326 | if not wlock: | |
323 | wlock = self.wlock() |
|
327 | wlock = self.wlock() | |
324 | l = self.lock() |
|
328 | l = self.lock() | |
325 | if os.path.exists(self.join("undo")): |
|
329 | if os.path.exists(self.join("undo")): | |
326 | self.ui.status(_("rolling back last transaction\n")) |
|
330 | self.ui.status(_("rolling back last transaction\n")) | |
327 | transaction.rollback(self.opener, self.join("undo")) |
|
331 | transaction.rollback(self.opener, self.join("undo")) | |
328 | util.rename(self.join("undo.dirstate"), self.join("dirstate")) |
|
332 | util.rename(self.join("undo.dirstate"), self.join("dirstate")) | |
329 | self.reload() |
|
333 | self.reload() | |
330 | self.wreload() |
|
334 | self.wreload() | |
331 | else: |
|
335 | else: | |
332 | self.ui.warn(_("no undo information available\n")) |
|
336 | self.ui.warn(_("no undo information available\n")) | |
333 |
|
337 | |||
334 | def wreload(self): |
|
338 | def wreload(self): | |
335 | self.dirstate.read() |
|
339 | self.dirstate.read() | |
336 |
|
340 | |||
337 | def reload(self): |
|
341 | def reload(self): | |
338 | self.changelog.load() |
|
342 | self.changelog.load() | |
339 | self.manifest.load() |
|
343 | self.manifest.load() | |
340 | self.tagscache = None |
|
344 | self.tagscache = None | |
341 | self.nodetagscache = None |
|
345 | self.nodetagscache = None | |
342 |
|
346 | |||
343 | def do_lock(self, lockname, wait, releasefn=None, acquirefn=None, |
|
347 | def do_lock(self, lockname, wait, releasefn=None, acquirefn=None, | |
344 | desc=None): |
|
348 | desc=None): | |
345 | try: |
|
349 | try: | |
346 | l = lock.lock(self.join(lockname), 0, releasefn, desc=desc) |
|
350 | l = lock.lock(self.join(lockname), 0, releasefn, desc=desc) | |
347 | except lock.LockHeld, inst: |
|
351 | except lock.LockHeld, inst: | |
348 | if not wait: |
|
352 | if not wait: | |
349 | raise |
|
353 | raise | |
350 | self.ui.warn(_("waiting for lock on %s held by %s\n") % |
|
354 | self.ui.warn(_("waiting for lock on %s held by %s\n") % | |
351 | (desc, inst.args[0])) |
|
355 | (desc, inst.args[0])) | |
352 | # default to 600 seconds timeout |
|
356 | # default to 600 seconds timeout | |
353 | l = lock.lock(self.join(lockname), |
|
357 | l = lock.lock(self.join(lockname), | |
354 | int(self.ui.config("ui", "timeout") or 600), |
|
358 | int(self.ui.config("ui", "timeout") or 600), | |
355 | releasefn, desc=desc) |
|
359 | releasefn, desc=desc) | |
356 | if acquirefn: |
|
360 | if acquirefn: | |
357 | acquirefn() |
|
361 | acquirefn() | |
358 | return l |
|
362 | return l | |
359 |
|
363 | |||
360 | def lock(self, wait=1): |
|
364 | def lock(self, wait=1): | |
361 | return self.do_lock("lock", wait, acquirefn=self.reload, |
|
365 | return self.do_lock("lock", wait, acquirefn=self.reload, | |
362 | desc=_('repository %s') % self.origroot) |
|
366 | desc=_('repository %s') % self.origroot) | |
363 |
|
367 | |||
364 | def wlock(self, wait=1): |
|
368 | def wlock(self, wait=1): | |
365 | return self.do_lock("wlock", wait, self.dirstate.write, |
|
369 | return self.do_lock("wlock", wait, self.dirstate.write, | |
366 | self.wreload, |
|
370 | self.wreload, | |
367 | desc=_('working directory of %s') % self.origroot) |
|
371 | desc=_('working directory of %s') % self.origroot) | |
368 |
|
372 | |||
369 | def checkfilemerge(self, filename, text, filelog, manifest1, manifest2): |
|
373 | def checkfilemerge(self, filename, text, filelog, manifest1, manifest2): | |
370 | "determine whether a new filenode is needed" |
|
374 | "determine whether a new filenode is needed" | |
371 | fp1 = manifest1.get(filename, nullid) |
|
375 | fp1 = manifest1.get(filename, nullid) | |
372 | fp2 = manifest2.get(filename, nullid) |
|
376 | fp2 = manifest2.get(filename, nullid) | |
373 |
|
377 | |||
374 | if fp2 != nullid: |
|
378 | if fp2 != nullid: | |
375 | # is one parent an ancestor of the other? |
|
379 | # is one parent an ancestor of the other? | |
376 | fpa = filelog.ancestor(fp1, fp2) |
|
380 | fpa = filelog.ancestor(fp1, fp2) | |
377 | if fpa == fp1: |
|
381 | if fpa == fp1: | |
378 | fp1, fp2 = fp2, nullid |
|
382 | fp1, fp2 = fp2, nullid | |
379 | elif fpa == fp2: |
|
383 | elif fpa == fp2: | |
380 | fp2 = nullid |
|
384 | fp2 = nullid | |
381 |
|
385 | |||
382 | # is the file unmodified from the parent? report existing entry |
|
386 | # is the file unmodified from the parent? report existing entry | |
383 | if fp2 == nullid and text == filelog.read(fp1): |
|
387 | if fp2 == nullid and text == filelog.read(fp1): | |
384 | return (fp1, None, None) |
|
388 | return (fp1, None, None) | |
385 |
|
389 | |||
386 | return (None, fp1, fp2) |
|
390 | return (None, fp1, fp2) | |
387 |
|
391 | |||
388 | def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None): |
|
392 | def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None): | |
389 | orig_parent = self.dirstate.parents()[0] or nullid |
|
393 | orig_parent = self.dirstate.parents()[0] or nullid | |
390 | p1 = p1 or self.dirstate.parents()[0] or nullid |
|
394 | p1 = p1 or self.dirstate.parents()[0] or nullid | |
391 | p2 = p2 or self.dirstate.parents()[1] or nullid |
|
395 | p2 = p2 or self.dirstate.parents()[1] or nullid | |
392 | c1 = self.changelog.read(p1) |
|
396 | c1 = self.changelog.read(p1) | |
393 | c2 = self.changelog.read(p2) |
|
397 | c2 = self.changelog.read(p2) | |
394 | m1 = self.manifest.read(c1[0]) |
|
398 | m1 = self.manifest.read(c1[0]) | |
395 | mf1 = self.manifest.readflags(c1[0]) |
|
399 | mf1 = self.manifest.readflags(c1[0]) | |
396 | m2 = self.manifest.read(c2[0]) |
|
400 | m2 = self.manifest.read(c2[0]) | |
397 | changed = [] |
|
401 | changed = [] | |
398 |
|
402 | |||
399 | if orig_parent == p1: |
|
403 | if orig_parent == p1: | |
400 | update_dirstate = 1 |
|
404 | update_dirstate = 1 | |
401 | else: |
|
405 | else: | |
402 | update_dirstate = 0 |
|
406 | update_dirstate = 0 | |
403 |
|
407 | |||
404 | if not wlock: |
|
408 | if not wlock: | |
405 | wlock = self.wlock() |
|
409 | wlock = self.wlock() | |
406 | l = self.lock() |
|
410 | l = self.lock() | |
407 | tr = self.transaction() |
|
411 | tr = self.transaction() | |
408 | mm = m1.copy() |
|
412 | mm = m1.copy() | |
409 | mfm = mf1.copy() |
|
413 | mfm = mf1.copy() | |
410 | linkrev = self.changelog.count() |
|
414 | linkrev = self.changelog.count() | |
411 | for f in files: |
|
415 | for f in files: | |
412 | try: |
|
416 | try: | |
413 | t = self.wread(f) |
|
417 | t = self.wread(f) | |
414 | tm = util.is_exec(self.wjoin(f), mfm.get(f, False)) |
|
418 | tm = util.is_exec(self.wjoin(f), mfm.get(f, False)) | |
415 | r = self.file(f) |
|
419 | r = self.file(f) | |
416 | mfm[f] = tm |
|
420 | mfm[f] = tm | |
417 |
|
421 | |||
418 | (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2) |
|
422 | (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2) | |
419 | if entry: |
|
423 | if entry: | |
420 | mm[f] = entry |
|
424 | mm[f] = entry | |
421 | continue |
|
425 | continue | |
422 |
|
426 | |||
423 | mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2) |
|
427 | mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2) | |
424 | changed.append(f) |
|
428 | changed.append(f) | |
425 | if update_dirstate: |
|
429 | if update_dirstate: | |
426 | self.dirstate.update([f], "n") |
|
430 | self.dirstate.update([f], "n") | |
427 | except IOError: |
|
431 | except IOError: | |
428 | try: |
|
432 | try: | |
429 | del mm[f] |
|
433 | del mm[f] | |
430 | del mfm[f] |
|
434 | del mfm[f] | |
431 | if update_dirstate: |
|
435 | if update_dirstate: | |
432 | self.dirstate.forget([f]) |
|
436 | self.dirstate.forget([f]) | |
433 | except: |
|
437 | except: | |
434 | # deleted from p2? |
|
438 | # deleted from p2? | |
435 | pass |
|
439 | pass | |
436 |
|
440 | |||
437 | mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0]) |
|
441 | mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0]) | |
438 | user = user or self.ui.username() |
|
442 | user = user or self.ui.username() | |
439 | n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date) |
|
443 | n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date) | |
440 | tr.close() |
|
444 | tr.close() | |
441 | if update_dirstate: |
|
445 | if update_dirstate: | |
442 | self.dirstate.setparents(n, nullid) |
|
446 | self.dirstate.setparents(n, nullid) | |
443 |
|
447 | |||
444 | def commit(self, files=None, text="", user=None, date=None, |
|
448 | def commit(self, files=None, text="", user=None, date=None, | |
445 | match=util.always, force=False, lock=None, wlock=None): |
|
449 | match=util.always, force=False, lock=None, wlock=None): | |
446 | commit = [] |
|
450 | commit = [] | |
447 | remove = [] |
|
451 | remove = [] | |
448 | changed = [] |
|
452 | changed = [] | |
449 |
|
453 | |||
450 | if files: |
|
454 | if files: | |
451 | for f in files: |
|
455 | for f in files: | |
452 | s = self.dirstate.state(f) |
|
456 | s = self.dirstate.state(f) | |
453 | if s in 'nmai': |
|
457 | if s in 'nmai': | |
454 | commit.append(f) |
|
458 | commit.append(f) | |
455 | elif s == 'r': |
|
459 | elif s == 'r': | |
456 | remove.append(f) |
|
460 | remove.append(f) | |
457 | else: |
|
461 | else: | |
458 | self.ui.warn(_("%s not tracked!\n") % f) |
|
462 | self.ui.warn(_("%s not tracked!\n") % f) | |
459 | else: |
|
463 | else: | |
460 | modified, added, removed, deleted, unknown = self.changes(match=match) |
|
464 | modified, added, removed, deleted, unknown = self.changes(match=match) | |
461 | commit = modified + added |
|
465 | commit = modified + added | |
462 | remove = removed |
|
466 | remove = removed | |
463 |
|
467 | |||
464 | p1, p2 = self.dirstate.parents() |
|
468 | p1, p2 = self.dirstate.parents() | |
465 | c1 = self.changelog.read(p1) |
|
469 | c1 = self.changelog.read(p1) | |
466 | c2 = self.changelog.read(p2) |
|
470 | c2 = self.changelog.read(p2) | |
467 | m1 = self.manifest.read(c1[0]) |
|
471 | m1 = self.manifest.read(c1[0]) | |
468 | mf1 = self.manifest.readflags(c1[0]) |
|
472 | mf1 = self.manifest.readflags(c1[0]) | |
469 | m2 = self.manifest.read(c2[0]) |
|
473 | m2 = self.manifest.read(c2[0]) | |
470 |
|
474 | |||
471 | if not commit and not remove and not force and p2 == nullid: |
|
475 | if not commit and not remove and not force and p2 == nullid: | |
472 | self.ui.status(_("nothing changed\n")) |
|
476 | self.ui.status(_("nothing changed\n")) | |
473 | return None |
|
477 | return None | |
474 |
|
478 | |||
475 | xp1 = hex(p1) |
|
479 | xp1 = hex(p1) | |
476 | if p2 == nullid: xp2 = '' |
|
480 | if p2 == nullid: xp2 = '' | |
477 | else: xp2 = hex(p2) |
|
481 | else: xp2 = hex(p2) | |
478 |
|
482 | |||
479 | self.hook("precommit", throw=True, parent1=xp1, parent2=xp2) |
|
483 | self.hook("precommit", throw=True, parent1=xp1, parent2=xp2) | |
480 |
|
484 | |||
481 | if not wlock: |
|
485 | if not wlock: | |
482 | wlock = self.wlock() |
|
486 | wlock = self.wlock() | |
483 | if not lock: |
|
487 | if not lock: | |
484 | lock = self.lock() |
|
488 | lock = self.lock() | |
485 | tr = self.transaction() |
|
489 | tr = self.transaction() | |
486 |
|
490 | |||
487 | # check in files |
|
491 | # check in files | |
488 | new = {} |
|
492 | new = {} | |
489 | linkrev = self.changelog.count() |
|
493 | linkrev = self.changelog.count() | |
490 | commit.sort() |
|
494 | commit.sort() | |
491 | for f in commit: |
|
495 | for f in commit: | |
492 | self.ui.note(f + "\n") |
|
496 | self.ui.note(f + "\n") | |
493 | try: |
|
497 | try: | |
494 | mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False)) |
|
498 | mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False)) | |
495 | t = self.wread(f) |
|
499 | t = self.wread(f) | |
496 | except IOError: |
|
500 | except IOError: | |
497 | self.ui.warn(_("trouble committing %s!\n") % f) |
|
501 | self.ui.warn(_("trouble committing %s!\n") % f) | |
498 | raise |
|
502 | raise | |
499 |
|
503 | |||
500 | r = self.file(f) |
|
504 | r = self.file(f) | |
501 |
|
505 | |||
502 | meta = {} |
|
506 | meta = {} | |
503 | cp = self.dirstate.copied(f) |
|
507 | cp = self.dirstate.copied(f) | |
504 | if cp: |
|
508 | if cp: | |
505 | meta["copy"] = cp |
|
509 | meta["copy"] = cp | |
506 | meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid))) |
|
510 | meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid))) | |
507 | self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"])) |
|
511 | self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"])) | |
508 | fp1, fp2 = nullid, nullid |
|
512 | fp1, fp2 = nullid, nullid | |
509 | else: |
|
513 | else: | |
510 | entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2) |
|
514 | entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2) | |
511 | if entry: |
|
515 | if entry: | |
512 | new[f] = entry |
|
516 | new[f] = entry | |
513 | continue |
|
517 | continue | |
514 |
|
518 | |||
515 | new[f] = r.add(t, meta, tr, linkrev, fp1, fp2) |
|
519 | new[f] = r.add(t, meta, tr, linkrev, fp1, fp2) | |
516 | # remember what we've added so that we can later calculate |
|
520 | # remember what we've added so that we can later calculate | |
517 | # the files to pull from a set of changesets |
|
521 | # the files to pull from a set of changesets | |
518 | changed.append(f) |
|
522 | changed.append(f) | |
519 |
|
523 | |||
520 | # update manifest |
|
524 | # update manifest | |
521 | m1 = m1.copy() |
|
525 | m1 = m1.copy() | |
522 | m1.update(new) |
|
526 | m1.update(new) | |
523 | for f in remove: |
|
527 | for f in remove: | |
524 | if f in m1: |
|
528 | if f in m1: | |
525 | del m1[f] |
|
529 | del m1[f] | |
526 | mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0], |
|
530 | mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0], | |
527 | (new, remove)) |
|
531 | (new, remove)) | |
528 |
|
532 | |||
529 | # add changeset |
|
533 | # add changeset | |
530 | new = new.keys() |
|
534 | new = new.keys() | |
531 | new.sort() |
|
535 | new.sort() | |
532 |
|
536 | |||
533 | user = user or self.ui.username() |
|
537 | user = user or self.ui.username() | |
534 | if not text: |
|
538 | if not text: | |
535 | edittext = [""] |
|
539 | edittext = [""] | |
536 | if p2 != nullid: |
|
540 | if p2 != nullid: | |
537 | edittext.append("HG: branch merge") |
|
541 | edittext.append("HG: branch merge") | |
538 | edittext.extend(["HG: changed %s" % f for f in changed]) |
|
542 | edittext.extend(["HG: changed %s" % f for f in changed]) | |
539 | edittext.extend(["HG: removed %s" % f for f in remove]) |
|
543 | edittext.extend(["HG: removed %s" % f for f in remove]) | |
540 | if not changed and not remove: |
|
544 | if not changed and not remove: | |
541 | edittext.append("HG: no files changed") |
|
545 | edittext.append("HG: no files changed") | |
542 | edittext.append("") |
|
546 | edittext.append("") | |
543 | # run editor in the repository root |
|
547 | # run editor in the repository root | |
544 | olddir = os.getcwd() |
|
548 | olddir = os.getcwd() | |
545 | os.chdir(self.root) |
|
549 | os.chdir(self.root) | |
546 | edittext = self.ui.edit("\n".join(edittext), user) |
|
550 | edittext = self.ui.edit("\n".join(edittext), user) | |
547 | os.chdir(olddir) |
|
551 | os.chdir(olddir) | |
548 | if not edittext.rstrip(): |
|
552 | if not edittext.rstrip(): | |
549 | return None |
|
553 | return None | |
550 | text = edittext |
|
554 | text = edittext | |
551 |
|
555 | |||
552 | n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date) |
|
556 | n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date) | |
553 | self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1, |
|
557 | self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1, | |
554 | parent2=xp2) |
|
558 | parent2=xp2) | |
555 | tr.close() |
|
559 | tr.close() | |
556 |
|
560 | |||
557 | self.dirstate.setparents(n) |
|
561 | self.dirstate.setparents(n) | |
558 | self.dirstate.update(new, "n") |
|
562 | self.dirstate.update(new, "n") | |
559 | self.dirstate.forget(remove) |
|
563 | self.dirstate.forget(remove) | |
560 |
|
564 | |||
561 | self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2) |
|
565 | self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2) | |
562 | return n |
|
566 | return n | |
563 |
|
567 | |||
564 | def walk(self, node=None, files=[], match=util.always, badmatch=None): |
|
568 | def walk(self, node=None, files=[], match=util.always, badmatch=None): | |
565 | if node: |
|
569 | if node: | |
566 | fdict = dict.fromkeys(files) |
|
570 | fdict = dict.fromkeys(files) | |
567 | for fn in self.manifest.read(self.changelog.read(node)[0]): |
|
571 | for fn in self.manifest.read(self.changelog.read(node)[0]): | |
568 | fdict.pop(fn, None) |
|
572 | fdict.pop(fn, None) | |
569 | if match(fn): |
|
573 | if match(fn): | |
570 | yield 'm', fn |
|
574 | yield 'm', fn | |
571 | for fn in fdict: |
|
575 | for fn in fdict: | |
572 | if badmatch and badmatch(fn): |
|
576 | if badmatch and badmatch(fn): | |
573 | if match(fn): |
|
577 | if match(fn): | |
574 | yield 'b', fn |
|
578 | yield 'b', fn | |
575 | else: |
|
579 | else: | |
576 | self.ui.warn(_('%s: No such file in rev %s\n') % ( |
|
580 | self.ui.warn(_('%s: No such file in rev %s\n') % ( | |
577 | util.pathto(self.getcwd(), fn), short(node))) |
|
581 | util.pathto(self.getcwd(), fn), short(node))) | |
578 | else: |
|
582 | else: | |
579 | for src, fn in self.dirstate.walk(files, match, badmatch=badmatch): |
|
583 | for src, fn in self.dirstate.walk(files, match, badmatch=badmatch): | |
580 | yield src, fn |
|
584 | yield src, fn | |
581 |
|
585 | |||
582 | def changes(self, node1=None, node2=None, files=[], match=util.always, |
|
586 | def changes(self, node1=None, node2=None, files=[], match=util.always, | |
583 | wlock=None, show_ignored=None): |
|
587 | wlock=None, show_ignored=None): | |
584 | """return changes between two nodes or node and working directory |
|
588 | """return changes between two nodes or node and working directory | |
585 |
|
589 | |||
586 | If node1 is None, use the first dirstate parent instead. |
|
590 | If node1 is None, use the first dirstate parent instead. | |
587 | If node2 is None, compare node1 with working directory. |
|
591 | If node2 is None, compare node1 with working directory. | |
588 | """ |
|
592 | """ | |
589 |
|
593 | |||
590 | def fcmp(fn, mf): |
|
594 | def fcmp(fn, mf): | |
591 | t1 = self.wread(fn) |
|
595 | t1 = self.wread(fn) | |
592 | t2 = self.file(fn).read(mf.get(fn, nullid)) |
|
596 | t2 = self.file(fn).read(mf.get(fn, nullid)) | |
593 | return cmp(t1, t2) |
|
597 | return cmp(t1, t2) | |
594 |
|
598 | |||
595 | def mfmatches(node): |
|
599 | def mfmatches(node): | |
596 | change = self.changelog.read(node) |
|
600 | change = self.changelog.read(node) | |
597 | mf = dict(self.manifest.read(change[0])) |
|
601 | mf = dict(self.manifest.read(change[0])) | |
598 | for fn in mf.keys(): |
|
602 | for fn in mf.keys(): | |
599 | if not match(fn): |
|
603 | if not match(fn): | |
600 | del mf[fn] |
|
604 | del mf[fn] | |
601 | return mf |
|
605 | return mf | |
602 |
|
606 | |||
603 | if node1: |
|
607 | if node1: | |
604 | # read the manifest from node1 before the manifest from node2, |
|
608 | # read the manifest from node1 before the manifest from node2, | |
605 | # so that we'll hit the manifest cache if we're going through |
|
609 | # so that we'll hit the manifest cache if we're going through | |
606 | # all the revisions in parent->child order. |
|
610 | # all the revisions in parent->child order. | |
607 | mf1 = mfmatches(node1) |
|
611 | mf1 = mfmatches(node1) | |
608 |
|
612 | |||
609 | # are we comparing the working directory? |
|
613 | # are we comparing the working directory? | |
610 | if not node2: |
|
614 | if not node2: | |
611 | if not wlock: |
|
615 | if not wlock: | |
612 | try: |
|
616 | try: | |
613 | wlock = self.wlock(wait=0) |
|
617 | wlock = self.wlock(wait=0) | |
614 | except lock.LockException: |
|
618 | except lock.LockException: | |
615 | wlock = None |
|
619 | wlock = None | |
616 | lookup, modified, added, removed, deleted, unknown, ignored = ( |
|
620 | lookup, modified, added, removed, deleted, unknown, ignored = ( | |
617 | self.dirstate.changes(files, match, show_ignored)) |
|
621 | self.dirstate.changes(files, match, show_ignored)) | |
618 |
|
622 | |||
619 | # are we comparing working dir against its parent? |
|
623 | # are we comparing working dir against its parent? | |
620 | if not node1: |
|
624 | if not node1: | |
621 | if lookup: |
|
625 | if lookup: | |
622 | # do a full compare of any files that might have changed |
|
626 | # do a full compare of any files that might have changed | |
623 | mf2 = mfmatches(self.dirstate.parents()[0]) |
|
627 | mf2 = mfmatches(self.dirstate.parents()[0]) | |
624 | for f in lookup: |
|
628 | for f in lookup: | |
625 | if fcmp(f, mf2): |
|
629 | if fcmp(f, mf2): | |
626 | modified.append(f) |
|
630 | modified.append(f) | |
627 | elif wlock is not None: |
|
631 | elif wlock is not None: | |
628 | self.dirstate.update([f], "n") |
|
632 | self.dirstate.update([f], "n") | |
629 | else: |
|
633 | else: | |
630 | # we are comparing working dir against non-parent |
|
634 | # we are comparing working dir against non-parent | |
631 | # generate a pseudo-manifest for the working dir |
|
635 | # generate a pseudo-manifest for the working dir | |
632 | mf2 = mfmatches(self.dirstate.parents()[0]) |
|
636 | mf2 = mfmatches(self.dirstate.parents()[0]) | |
633 | for f in lookup + modified + added: |
|
637 | for f in lookup + modified + added: | |
634 | mf2[f] = "" |
|
638 | mf2[f] = "" | |
635 | for f in removed: |
|
639 | for f in removed: | |
636 | if f in mf2: |
|
640 | if f in mf2: | |
637 | del mf2[f] |
|
641 | del mf2[f] | |
638 | else: |
|
642 | else: | |
639 | # we are comparing two revisions |
|
643 | # we are comparing two revisions | |
640 | deleted, unknown, ignored = [], [], [] |
|
644 | deleted, unknown, ignored = [], [], [] | |
641 | mf2 = mfmatches(node2) |
|
645 | mf2 = mfmatches(node2) | |
642 |
|
646 | |||
643 | if node1: |
|
647 | if node1: | |
644 | # flush lists from dirstate before comparing manifests |
|
648 | # flush lists from dirstate before comparing manifests | |
645 | modified, added = [], [] |
|
649 | modified, added = [], [] | |
646 |
|
650 | |||
647 | for fn in mf2: |
|
651 | for fn in mf2: | |
648 | if mf1.has_key(fn): |
|
652 | if mf1.has_key(fn): | |
649 | if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)): |
|
653 | if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)): | |
650 | modified.append(fn) |
|
654 | modified.append(fn) | |
651 | del mf1[fn] |
|
655 | del mf1[fn] | |
652 | else: |
|
656 | else: | |
653 | added.append(fn) |
|
657 | added.append(fn) | |
654 |
|
658 | |||
655 | removed = mf1.keys() |
|
659 | removed = mf1.keys() | |
656 |
|
660 | |||
657 | # sort and return results: |
|
661 | # sort and return results: | |
658 | for l in modified, added, removed, deleted, unknown, ignored: |
|
662 | for l in modified, added, removed, deleted, unknown, ignored: | |
659 | l.sort() |
|
663 | l.sort() | |
660 | if show_ignored is None: |
|
664 | if show_ignored is None: | |
661 | return (modified, added, removed, deleted, unknown) |
|
665 | return (modified, added, removed, deleted, unknown) | |
662 | else: |
|
666 | else: | |
663 | return (modified, added, removed, deleted, unknown, ignored) |
|
667 | return (modified, added, removed, deleted, unknown, ignored) | |
664 |
|
668 | |||
665 | def add(self, list, wlock=None): |
|
669 | def add(self, list, wlock=None): | |
666 | if not wlock: |
|
670 | if not wlock: | |
667 | wlock = self.wlock() |
|
671 | wlock = self.wlock() | |
668 | for f in list: |
|
672 | for f in list: | |
669 | p = self.wjoin(f) |
|
673 | p = self.wjoin(f) | |
670 | if not os.path.exists(p): |
|
674 | if not os.path.exists(p): | |
671 | self.ui.warn(_("%s does not exist!\n") % f) |
|
675 | self.ui.warn(_("%s does not exist!\n") % f) | |
672 | elif not os.path.isfile(p): |
|
676 | elif not os.path.isfile(p): | |
673 | self.ui.warn(_("%s not added: only files supported currently\n") |
|
677 | self.ui.warn(_("%s not added: only files supported currently\n") | |
674 | % f) |
|
678 | % f) | |
675 | elif self.dirstate.state(f) in 'an': |
|
679 | elif self.dirstate.state(f) in 'an': | |
676 | self.ui.warn(_("%s already tracked!\n") % f) |
|
680 | self.ui.warn(_("%s already tracked!\n") % f) | |
677 | else: |
|
681 | else: | |
678 | self.dirstate.update([f], "a") |
|
682 | self.dirstate.update([f], "a") | |
679 |
|
683 | |||
680 | def forget(self, list, wlock=None): |
|
684 | def forget(self, list, wlock=None): | |
681 | if not wlock: |
|
685 | if not wlock: | |
682 | wlock = self.wlock() |
|
686 | wlock = self.wlock() | |
683 | for f in list: |
|
687 | for f in list: | |
684 | if self.dirstate.state(f) not in 'ai': |
|
688 | if self.dirstate.state(f) not in 'ai': | |
685 | self.ui.warn(_("%s not added!\n") % f) |
|
689 | self.ui.warn(_("%s not added!\n") % f) | |
686 | else: |
|
690 | else: | |
687 | self.dirstate.forget([f]) |
|
691 | self.dirstate.forget([f]) | |
688 |
|
692 | |||
689 | def remove(self, list, unlink=False, wlock=None): |
|
693 | def remove(self, list, unlink=False, wlock=None): | |
690 | if unlink: |
|
694 | if unlink: | |
691 | for f in list: |
|
695 | for f in list: | |
692 | try: |
|
696 | try: | |
693 | util.unlink(self.wjoin(f)) |
|
697 | util.unlink(self.wjoin(f)) | |
694 | except OSError, inst: |
|
698 | except OSError, inst: | |
695 | if inst.errno != errno.ENOENT: |
|
699 | if inst.errno != errno.ENOENT: | |
696 | raise |
|
700 | raise | |
697 | if not wlock: |
|
701 | if not wlock: | |
698 | wlock = self.wlock() |
|
702 | wlock = self.wlock() | |
699 | for f in list: |
|
703 | for f in list: | |
700 | p = self.wjoin(f) |
|
704 | p = self.wjoin(f) | |
701 | if os.path.exists(p): |
|
705 | if os.path.exists(p): | |
702 | self.ui.warn(_("%s still exists!\n") % f) |
|
706 | self.ui.warn(_("%s still exists!\n") % f) | |
703 | elif self.dirstate.state(f) == 'a': |
|
707 | elif self.dirstate.state(f) == 'a': | |
704 | self.dirstate.forget([f]) |
|
708 | self.dirstate.forget([f]) | |
705 | elif f not in self.dirstate: |
|
709 | elif f not in self.dirstate: | |
706 | self.ui.warn(_("%s not tracked!\n") % f) |
|
710 | self.ui.warn(_("%s not tracked!\n") % f) | |
707 | else: |
|
711 | else: | |
708 | self.dirstate.update([f], "r") |
|
712 | self.dirstate.update([f], "r") | |
709 |
|
713 | |||
710 | def undelete(self, list, wlock=None): |
|
714 | def undelete(self, list, wlock=None): | |
711 | p = self.dirstate.parents()[0] |
|
715 | p = self.dirstate.parents()[0] | |
712 | mn = self.changelog.read(p)[0] |
|
716 | mn = self.changelog.read(p)[0] | |
713 | mf = self.manifest.readflags(mn) |
|
717 | mf = self.manifest.readflags(mn) | |
714 | m = self.manifest.read(mn) |
|
718 | m = self.manifest.read(mn) | |
715 | if not wlock: |
|
719 | if not wlock: | |
716 | wlock = self.wlock() |
|
720 | wlock = self.wlock() | |
717 | for f in list: |
|
721 | for f in list: | |
718 | if self.dirstate.state(f) not in "r": |
|
722 | if self.dirstate.state(f) not in "r": | |
719 | self.ui.warn("%s not removed!\n" % f) |
|
723 | self.ui.warn("%s not removed!\n" % f) | |
720 | else: |
|
724 | else: | |
721 | t = self.file(f).read(m[f]) |
|
725 | t = self.file(f).read(m[f]) | |
722 | self.wwrite(f, t) |
|
726 | self.wwrite(f, t) | |
723 | util.set_exec(self.wjoin(f), mf[f]) |
|
727 | util.set_exec(self.wjoin(f), mf[f]) | |
724 | self.dirstate.update([f], "n") |
|
728 | self.dirstate.update([f], "n") | |
725 |
|
729 | |||
726 | def copy(self, source, dest, wlock=None): |
|
730 | def copy(self, source, dest, wlock=None): | |
727 | p = self.wjoin(dest) |
|
731 | p = self.wjoin(dest) | |
728 | if not os.path.exists(p): |
|
732 | if not os.path.exists(p): | |
729 | self.ui.warn(_("%s does not exist!\n") % dest) |
|
733 | self.ui.warn(_("%s does not exist!\n") % dest) | |
730 | elif not os.path.isfile(p): |
|
734 | elif not os.path.isfile(p): | |
731 | self.ui.warn(_("copy failed: %s is not a file\n") % dest) |
|
735 | self.ui.warn(_("copy failed: %s is not a file\n") % dest) | |
732 | else: |
|
736 | else: | |
733 | if not wlock: |
|
737 | if not wlock: | |
734 | wlock = self.wlock() |
|
738 | wlock = self.wlock() | |
735 | if self.dirstate.state(dest) == '?': |
|
739 | if self.dirstate.state(dest) == '?': | |
736 | self.dirstate.update([dest], "a") |
|
740 | self.dirstate.update([dest], "a") | |
737 | self.dirstate.copy(source, dest) |
|
741 | self.dirstate.copy(source, dest) | |
738 |
|
742 | |||
739 | def heads(self, start=None): |
|
743 | def heads(self, start=None): | |
740 | heads = self.changelog.heads(start) |
|
744 | heads = self.changelog.heads(start) | |
741 | # sort the output in rev descending order |
|
745 | # sort the output in rev descending order | |
742 | heads = [(-self.changelog.rev(h), h) for h in heads] |
|
746 | heads = [(-self.changelog.rev(h), h) for h in heads] | |
743 | heads.sort() |
|
747 | heads.sort() | |
744 | return [n for (r, n) in heads] |
|
748 | return [n for (r, n) in heads] | |
745 |
|
749 | |||
746 | # branchlookup returns a dict giving a list of branches for |
|
750 | # branchlookup returns a dict giving a list of branches for | |
747 | # each head. A branch is defined as the tag of a node or |
|
751 | # each head. A branch is defined as the tag of a node or | |
748 | # the branch of the node's parents. If a node has multiple |
|
752 | # the branch of the node's parents. If a node has multiple | |
749 | # branch tags, tags are eliminated if they are visible from other |
|
753 | # branch tags, tags are eliminated if they are visible from other | |
750 | # branch tags. |
|
754 | # branch tags. | |
751 | # |
|
755 | # | |
752 | # So, for this graph: a->b->c->d->e |
|
756 | # So, for this graph: a->b->c->d->e | |
753 | # \ / |
|
757 | # \ / | |
754 | # aa -----/ |
|
758 | # aa -----/ | |
755 | # a has tag 2.6.12 |
|
759 | # a has tag 2.6.12 | |
756 | # d has tag 2.6.13 |
|
760 | # d has tag 2.6.13 | |
757 | # e would have branch tags for 2.6.12 and 2.6.13. Because the node |
|
761 | # e would have branch tags for 2.6.12 and 2.6.13. Because the node | |
758 | # for 2.6.12 can be reached from the node 2.6.13, that is eliminated |
|
762 | # for 2.6.12 can be reached from the node 2.6.13, that is eliminated | |
759 | # from the list. |
|
763 | # from the list. | |
760 | # |
|
764 | # | |
761 | # It is possible that more than one head will have the same branch tag. |
|
765 | # It is possible that more than one head will have the same branch tag. | |
762 | # callers need to check the result for multiple heads under the same |
|
766 | # callers need to check the result for multiple heads under the same | |
763 | # branch tag if that is a problem for them (ie checkout of a specific |
|
767 | # branch tag if that is a problem for them (ie checkout of a specific | |
764 | # branch). |
|
768 | # branch). | |
765 | # |
|
769 | # | |
766 | # passing in a specific branch will limit the depth of the search |
|
770 | # passing in a specific branch will limit the depth of the search | |
767 | # through the parents. It won't limit the branches returned in the |
|
771 | # through the parents. It won't limit the branches returned in the | |
768 | # result though. |
|
772 | # result though. | |
769 | def branchlookup(self, heads=None, branch=None): |
|
773 | def branchlookup(self, heads=None, branch=None): | |
770 | if not heads: |
|
774 | if not heads: | |
771 | heads = self.heads() |
|
775 | heads = self.heads() | |
772 | headt = [ h for h in heads ] |
|
776 | headt = [ h for h in heads ] | |
773 | chlog = self.changelog |
|
777 | chlog = self.changelog | |
774 | branches = {} |
|
778 | branches = {} | |
775 | merges = [] |
|
779 | merges = [] | |
776 | seenmerge = {} |
|
780 | seenmerge = {} | |
777 |
|
781 | |||
778 | # traverse the tree once for each head, recording in the branches |
|
782 | # traverse the tree once for each head, recording in the branches | |
779 | # dict which tags are visible from this head. The branches |
|
783 | # dict which tags are visible from this head. The branches | |
780 | # dict also records which tags are visible from each tag |
|
784 | # dict also records which tags are visible from each tag | |
781 | # while we traverse. |
|
785 | # while we traverse. | |
782 | while headt or merges: |
|
786 | while headt or merges: | |
783 | if merges: |
|
787 | if merges: | |
784 | n, found = merges.pop() |
|
788 | n, found = merges.pop() | |
785 | visit = [n] |
|
789 | visit = [n] | |
786 | else: |
|
790 | else: | |
787 | h = headt.pop() |
|
791 | h = headt.pop() | |
788 | visit = [h] |
|
792 | visit = [h] | |
789 | found = [h] |
|
793 | found = [h] | |
790 | seen = {} |
|
794 | seen = {} | |
791 | while visit: |
|
795 | while visit: | |
792 | n = visit.pop() |
|
796 | n = visit.pop() | |
793 | if n in seen: |
|
797 | if n in seen: | |
794 | continue |
|
798 | continue | |
795 | pp = chlog.parents(n) |
|
799 | pp = chlog.parents(n) | |
796 | tags = self.nodetags(n) |
|
800 | tags = self.nodetags(n) | |
797 | if tags: |
|
801 | if tags: | |
798 | for x in tags: |
|
802 | for x in tags: | |
799 | if x == 'tip': |
|
803 | if x == 'tip': | |
800 | continue |
|
804 | continue | |
801 | for f in found: |
|
805 | for f in found: | |
802 | branches.setdefault(f, {})[n] = 1 |
|
806 | branches.setdefault(f, {})[n] = 1 | |
803 | branches.setdefault(n, {})[n] = 1 |
|
807 | branches.setdefault(n, {})[n] = 1 | |
804 | break |
|
808 | break | |
805 | if n not in found: |
|
809 | if n not in found: | |
806 | found.append(n) |
|
810 | found.append(n) | |
807 | if branch in tags: |
|
811 | if branch in tags: | |
808 | continue |
|
812 | continue | |
809 | seen[n] = 1 |
|
813 | seen[n] = 1 | |
810 | if pp[1] != nullid and n not in seenmerge: |
|
814 | if pp[1] != nullid and n not in seenmerge: | |
811 | merges.append((pp[1], [x for x in found])) |
|
815 | merges.append((pp[1], [x for x in found])) | |
812 | seenmerge[n] = 1 |
|
816 | seenmerge[n] = 1 | |
813 | if pp[0] != nullid: |
|
817 | if pp[0] != nullid: | |
814 | visit.append(pp[0]) |
|
818 | visit.append(pp[0]) | |
815 | # traverse the branches dict, eliminating branch tags from each |
|
819 | # traverse the branches dict, eliminating branch tags from each | |
816 | # head that are visible from another branch tag for that head. |
|
820 | # head that are visible from another branch tag for that head. | |
817 | out = {} |
|
821 | out = {} | |
818 | viscache = {} |
|
822 | viscache = {} | |
819 | for h in heads: |
|
823 | for h in heads: | |
820 | def visible(node): |
|
824 | def visible(node): | |
821 | if node in viscache: |
|
825 | if node in viscache: | |
822 | return viscache[node] |
|
826 | return viscache[node] | |
823 | ret = {} |
|
827 | ret = {} | |
824 | visit = [node] |
|
828 | visit = [node] | |
825 | while visit: |
|
829 | while visit: | |
826 | x = visit.pop() |
|
830 | x = visit.pop() | |
827 | if x in viscache: |
|
831 | if x in viscache: | |
828 | ret.update(viscache[x]) |
|
832 | ret.update(viscache[x]) | |
829 | elif x not in ret: |
|
833 | elif x not in ret: | |
830 | ret[x] = 1 |
|
834 | ret[x] = 1 | |
831 | if x in branches: |
|
835 | if x in branches: | |
832 | visit[len(visit):] = branches[x].keys() |
|
836 | visit[len(visit):] = branches[x].keys() | |
833 | viscache[node] = ret |
|
837 | viscache[node] = ret | |
834 | return ret |
|
838 | return ret | |
835 | if h not in branches: |
|
839 | if h not in branches: | |
836 | continue |
|
840 | continue | |
837 | # O(n^2), but somewhat limited. This only searches the |
|
841 | # O(n^2), but somewhat limited. This only searches the | |
838 | # tags visible from a specific head, not all the tags in the |
|
842 | # tags visible from a specific head, not all the tags in the | |
839 | # whole repo. |
|
843 | # whole repo. | |
840 | for b in branches[h]: |
|
844 | for b in branches[h]: | |
841 | vis = False |
|
845 | vis = False | |
842 | for bb in branches[h].keys(): |
|
846 | for bb in branches[h].keys(): | |
843 | if b != bb: |
|
847 | if b != bb: | |
844 | if b in visible(bb): |
|
848 | if b in visible(bb): | |
845 | vis = True |
|
849 | vis = True | |
846 | break |
|
850 | break | |
847 | if not vis: |
|
851 | if not vis: | |
848 | l = out.setdefault(h, []) |
|
852 | l = out.setdefault(h, []) | |
849 | l[len(l):] = self.nodetags(b) |
|
853 | l[len(l):] = self.nodetags(b) | |
850 | return out |
|
854 | return out | |
851 |
|
855 | |||
852 | def branches(self, nodes): |
|
856 | def branches(self, nodes): | |
853 | if not nodes: |
|
857 | if not nodes: | |
854 | nodes = [self.changelog.tip()] |
|
858 | nodes = [self.changelog.tip()] | |
855 | b = [] |
|
859 | b = [] | |
856 | for n in nodes: |
|
860 | for n in nodes: | |
857 | t = n |
|
861 | t = n | |
858 | while n: |
|
862 | while n: | |
859 | p = self.changelog.parents(n) |
|
863 | p = self.changelog.parents(n) | |
860 | if p[1] != nullid or p[0] == nullid: |
|
864 | if p[1] != nullid or p[0] == nullid: | |
861 | b.append((t, n, p[0], p[1])) |
|
865 | b.append((t, n, p[0], p[1])) | |
862 | break |
|
866 | break | |
863 | n = p[0] |
|
867 | n = p[0] | |
864 | return b |
|
868 | return b | |
865 |
|
869 | |||
866 | def between(self, pairs): |
|
870 | def between(self, pairs): | |
867 | r = [] |
|
871 | r = [] | |
868 |
|
872 | |||
869 | for top, bottom in pairs: |
|
873 | for top, bottom in pairs: | |
870 | n, l, i = top, [], 0 |
|
874 | n, l, i = top, [], 0 | |
871 | f = 1 |
|
875 | f = 1 | |
872 |
|
876 | |||
873 | while n != bottom: |
|
877 | while n != bottom: | |
874 | p = self.changelog.parents(n)[0] |
|
878 | p = self.changelog.parents(n)[0] | |
875 | if i == f: |
|
879 | if i == f: | |
876 | l.append(n) |
|
880 | l.append(n) | |
877 | f = f * 2 |
|
881 | f = f * 2 | |
878 | n = p |
|
882 | n = p | |
879 | i += 1 |
|
883 | i += 1 | |
880 |
|
884 | |||
881 | r.append(l) |
|
885 | r.append(l) | |
882 |
|
886 | |||
883 | return r |
|
887 | return r | |
884 |
|
888 | |||
885 | def findincoming(self, remote, base=None, heads=None, force=False): |
|
889 | def findincoming(self, remote, base=None, heads=None, force=False): | |
886 | m = self.changelog.nodemap |
|
890 | m = self.changelog.nodemap | |
887 | search = [] |
|
891 | search = [] | |
888 | fetch = {} |
|
892 | fetch = {} | |
889 | seen = {} |
|
893 | seen = {} | |
890 | seenbranch = {} |
|
894 | seenbranch = {} | |
891 | if base == None: |
|
895 | if base == None: | |
892 | base = {} |
|
896 | base = {} | |
893 |
|
897 | |||
894 | if not heads: |
|
898 | if not heads: | |
895 | heads = remote.heads() |
|
899 | heads = remote.heads() | |
896 |
|
900 | |||
897 | if self.changelog.tip() == nullid: |
|
901 | if self.changelog.tip() == nullid: | |
898 | if heads != [nullid]: |
|
902 | if heads != [nullid]: | |
899 | return [nullid] |
|
903 | return [nullid] | |
900 | return [] |
|
904 | return [] | |
901 |
|
905 | |||
902 | # assume we're closer to the tip than the root |
|
906 | # assume we're closer to the tip than the root | |
903 | # and start by examining the heads |
|
907 | # and start by examining the heads | |
904 | self.ui.status(_("searching for changes\n")) |
|
908 | self.ui.status(_("searching for changes\n")) | |
905 |
|
909 | |||
906 | unknown = [] |
|
910 | unknown = [] | |
907 | for h in heads: |
|
911 | for h in heads: | |
908 | if h not in m: |
|
912 | if h not in m: | |
909 | unknown.append(h) |
|
913 | unknown.append(h) | |
910 | else: |
|
914 | else: | |
911 | base[h] = 1 |
|
915 | base[h] = 1 | |
912 |
|
916 | |||
913 | if not unknown: |
|
917 | if not unknown: | |
914 | return [] |
|
918 | return [] | |
915 |
|
919 | |||
916 | rep = {} |
|
920 | rep = {} | |
917 | reqcnt = 0 |
|
921 | reqcnt = 0 | |
918 |
|
922 | |||
919 | # search through remote branches |
|
923 | # search through remote branches | |
920 | # a 'branch' here is a linear segment of history, with four parts: |
|
924 | # a 'branch' here is a linear segment of history, with four parts: | |
921 | # head, root, first parent, second parent |
|
925 | # head, root, first parent, second parent | |
922 | # (a branch always has two parents (or none) by definition) |
|
926 | # (a branch always has two parents (or none) by definition) | |
923 | unknown = remote.branches(unknown) |
|
927 | unknown = remote.branches(unknown) | |
924 | while unknown: |
|
928 | while unknown: | |
925 | r = [] |
|
929 | r = [] | |
926 | while unknown: |
|
930 | while unknown: | |
927 | n = unknown.pop(0) |
|
931 | n = unknown.pop(0) | |
928 | if n[0] in seen: |
|
932 | if n[0] in seen: | |
929 | continue |
|
933 | continue | |
930 |
|
934 | |||
931 | self.ui.debug(_("examining %s:%s\n") |
|
935 | self.ui.debug(_("examining %s:%s\n") | |
932 | % (short(n[0]), short(n[1]))) |
|
936 | % (short(n[0]), short(n[1]))) | |
933 | if n[0] == nullid: |
|
937 | if n[0] == nullid: | |
934 | break |
|
938 | break | |
935 | if n in seenbranch: |
|
939 | if n in seenbranch: | |
936 | self.ui.debug(_("branch already found\n")) |
|
940 | self.ui.debug(_("branch already found\n")) | |
937 | continue |
|
941 | continue | |
938 | if n[1] and n[1] in m: # do we know the base? |
|
942 | if n[1] and n[1] in m: # do we know the base? | |
939 | self.ui.debug(_("found incomplete branch %s:%s\n") |
|
943 | self.ui.debug(_("found incomplete branch %s:%s\n") | |
940 | % (short(n[0]), short(n[1]))) |
|
944 | % (short(n[0]), short(n[1]))) | |
941 | search.append(n) # schedule branch range for scanning |
|
945 | search.append(n) # schedule branch range for scanning | |
942 | seenbranch[n] = 1 |
|
946 | seenbranch[n] = 1 | |
943 | else: |
|
947 | else: | |
944 | if n[1] not in seen and n[1] not in fetch: |
|
948 | if n[1] not in seen and n[1] not in fetch: | |
945 | if n[2] in m and n[3] in m: |
|
949 | if n[2] in m and n[3] in m: | |
946 | self.ui.debug(_("found new changeset %s\n") % |
|
950 | self.ui.debug(_("found new changeset %s\n") % | |
947 | short(n[1])) |
|
951 | short(n[1])) | |
948 | fetch[n[1]] = 1 # earliest unknown |
|
952 | fetch[n[1]] = 1 # earliest unknown | |
949 | base[n[2]] = 1 # latest known |
|
953 | base[n[2]] = 1 # latest known | |
950 | continue |
|
954 | continue | |
951 |
|
955 | |||
952 | for a in n[2:4]: |
|
956 | for a in n[2:4]: | |
953 | if a not in rep: |
|
957 | if a not in rep: | |
954 | r.append(a) |
|
958 | r.append(a) | |
955 | rep[a] = 1 |
|
959 | rep[a] = 1 | |
956 |
|
960 | |||
957 | seen[n[0]] = 1 |
|
961 | seen[n[0]] = 1 | |
958 |
|
962 | |||
959 | if r: |
|
963 | if r: | |
960 | reqcnt += 1 |
|
964 | reqcnt += 1 | |
961 | self.ui.debug(_("request %d: %s\n") % |
|
965 | self.ui.debug(_("request %d: %s\n") % | |
962 | (reqcnt, " ".join(map(short, r)))) |
|
966 | (reqcnt, " ".join(map(short, r)))) | |
963 | for p in range(0, len(r), 10): |
|
967 | for p in range(0, len(r), 10): | |
964 | for b in remote.branches(r[p:p+10]): |
|
968 | for b in remote.branches(r[p:p+10]): | |
965 | self.ui.debug(_("received %s:%s\n") % |
|
969 | self.ui.debug(_("received %s:%s\n") % | |
966 | (short(b[0]), short(b[1]))) |
|
970 | (short(b[0]), short(b[1]))) | |
967 | if b[0] in m: |
|
971 | if b[0] in m: | |
968 | self.ui.debug(_("found base node %s\n") |
|
972 | self.ui.debug(_("found base node %s\n") | |
969 | % short(b[0])) |
|
973 | % short(b[0])) | |
970 | base[b[0]] = 1 |
|
974 | base[b[0]] = 1 | |
971 | elif b[0] not in seen: |
|
975 | elif b[0] not in seen: | |
972 | unknown.append(b) |
|
976 | unknown.append(b) | |
973 |
|
977 | |||
974 | # do binary search on the branches we found |
|
978 | # do binary search on the branches we found | |
975 | while search: |
|
979 | while search: | |
976 | n = search.pop(0) |
|
980 | n = search.pop(0) | |
977 | reqcnt += 1 |
|
981 | reqcnt += 1 | |
978 | l = remote.between([(n[0], n[1])])[0] |
|
982 | l = remote.between([(n[0], n[1])])[0] | |
979 | l.append(n[1]) |
|
983 | l.append(n[1]) | |
980 | p = n[0] |
|
984 | p = n[0] | |
981 | f = 1 |
|
985 | f = 1 | |
982 | for i in l: |
|
986 | for i in l: | |
983 | self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i))) |
|
987 | self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i))) | |
984 | if i in m: |
|
988 | if i in m: | |
985 | if f <= 2: |
|
989 | if f <= 2: | |
986 | self.ui.debug(_("found new branch changeset %s\n") % |
|
990 | self.ui.debug(_("found new branch changeset %s\n") % | |
987 | short(p)) |
|
991 | short(p)) | |
988 | fetch[p] = 1 |
|
992 | fetch[p] = 1 | |
989 | base[i] = 1 |
|
993 | base[i] = 1 | |
990 | else: |
|
994 | else: | |
991 | self.ui.debug(_("narrowed branch search to %s:%s\n") |
|
995 | self.ui.debug(_("narrowed branch search to %s:%s\n") | |
992 | % (short(p), short(i))) |
|
996 | % (short(p), short(i))) | |
993 | search.append((p, i)) |
|
997 | search.append((p, i)) | |
994 | break |
|
998 | break | |
995 | p, f = i, f * 2 |
|
999 | p, f = i, f * 2 | |
996 |
|
1000 | |||
997 | # sanity check our fetch list |
|
1001 | # sanity check our fetch list | |
998 | for f in fetch.keys(): |
|
1002 | for f in fetch.keys(): | |
999 | if f in m: |
|
1003 | if f in m: | |
1000 | raise repo.RepoError(_("already have changeset ") + short(f[:4])) |
|
1004 | raise repo.RepoError(_("already have changeset ") + short(f[:4])) | |
1001 |
|
1005 | |||
1002 | if base.keys() == [nullid]: |
|
1006 | if base.keys() == [nullid]: | |
1003 | if force: |
|
1007 | if force: | |
1004 | self.ui.warn(_("warning: repository is unrelated\n")) |
|
1008 | self.ui.warn(_("warning: repository is unrelated\n")) | |
1005 | else: |
|
1009 | else: | |
1006 | raise util.Abort(_("repository is unrelated")) |
|
1010 | raise util.Abort(_("repository is unrelated")) | |
1007 |
|
1011 | |||
1008 | self.ui.note(_("found new changesets starting at ") + |
|
1012 | self.ui.note(_("found new changesets starting at ") + | |
1009 | " ".join([short(f) for f in fetch]) + "\n") |
|
1013 | " ".join([short(f) for f in fetch]) + "\n") | |
1010 |
|
1014 | |||
1011 | self.ui.debug(_("%d total queries\n") % reqcnt) |
|
1015 | self.ui.debug(_("%d total queries\n") % reqcnt) | |
1012 |
|
1016 | |||
1013 | return fetch.keys() |
|
1017 | return fetch.keys() | |
1014 |
|
1018 | |||
1015 | def findoutgoing(self, remote, base=None, heads=None, force=False): |
|
1019 | def findoutgoing(self, remote, base=None, heads=None, force=False): | |
1016 | """Return list of nodes that are roots of subsets not in remote |
|
1020 | """Return list of nodes that are roots of subsets not in remote | |
1017 |
|
1021 | |||
1018 | If base dict is specified, assume that these nodes and their parents |
|
1022 | If base dict is specified, assume that these nodes and their parents | |
1019 | exist on the remote side. |
|
1023 | exist on the remote side. | |
1020 | If a list of heads is specified, return only nodes which are heads |
|
1024 | If a list of heads is specified, return only nodes which are heads | |
1021 | or ancestors of these heads, and return a second element which |
|
1025 | or ancestors of these heads, and return a second element which | |
1022 | contains all remote heads which get new children. |
|
1026 | contains all remote heads which get new children. | |
1023 | """ |
|
1027 | """ | |
1024 | if base == None: |
|
1028 | if base == None: | |
1025 | base = {} |
|
1029 | base = {} | |
1026 | self.findincoming(remote, base, heads, force=force) |
|
1030 | self.findincoming(remote, base, heads, force=force) | |
1027 |
|
1031 | |||
1028 | self.ui.debug(_("common changesets up to ") |
|
1032 | self.ui.debug(_("common changesets up to ") | |
1029 | + " ".join(map(short, base.keys())) + "\n") |
|
1033 | + " ".join(map(short, base.keys())) + "\n") | |
1030 |
|
1034 | |||
1031 | remain = dict.fromkeys(self.changelog.nodemap) |
|
1035 | remain = dict.fromkeys(self.changelog.nodemap) | |
1032 |
|
1036 | |||
1033 | # prune everything remote has from the tree |
|
1037 | # prune everything remote has from the tree | |
1034 | del remain[nullid] |
|
1038 | del remain[nullid] | |
1035 | remove = base.keys() |
|
1039 | remove = base.keys() | |
1036 | while remove: |
|
1040 | while remove: | |
1037 | n = remove.pop(0) |
|
1041 | n = remove.pop(0) | |
1038 | if n in remain: |
|
1042 | if n in remain: | |
1039 | del remain[n] |
|
1043 | del remain[n] | |
1040 | for p in self.changelog.parents(n): |
|
1044 | for p in self.changelog.parents(n): | |
1041 | remove.append(p) |
|
1045 | remove.append(p) | |
1042 |
|
1046 | |||
1043 | # find every node whose parents have been pruned |
|
1047 | # find every node whose parents have been pruned | |
1044 | subset = [] |
|
1048 | subset = [] | |
1045 | # find every remote head that will get new children |
|
1049 | # find every remote head that will get new children | |
1046 | updated_heads = {} |
|
1050 | updated_heads = {} | |
1047 | for n in remain: |
|
1051 | for n in remain: | |
1048 | p1, p2 = self.changelog.parents(n) |
|
1052 | p1, p2 = self.changelog.parents(n) | |
1049 | if p1 not in remain and p2 not in remain: |
|
1053 | if p1 not in remain and p2 not in remain: | |
1050 | subset.append(n) |
|
1054 | subset.append(n) | |
1051 | if heads: |
|
1055 | if heads: | |
1052 | if p1 in heads: |
|
1056 | if p1 in heads: | |
1053 | updated_heads[p1] = True |
|
1057 | updated_heads[p1] = True | |
1054 | if p2 in heads: |
|
1058 | if p2 in heads: | |
1055 | updated_heads[p2] = True |
|
1059 | updated_heads[p2] = True | |
1056 |
|
1060 | |||
1057 | # this is the set of all roots we have to push |
|
1061 | # this is the set of all roots we have to push | |
1058 | if heads: |
|
1062 | if heads: | |
1059 | return subset, updated_heads.keys() |
|
1063 | return subset, updated_heads.keys() | |
1060 | else: |
|
1064 | else: | |
1061 | return subset |
|
1065 | return subset | |
1062 |
|
1066 | |||
1063 | def pull(self, remote, heads=None, force=False): |
|
1067 | def pull(self, remote, heads=None, force=False): | |
1064 | l = self.lock() |
|
1068 | l = self.lock() | |
1065 |
|
1069 | |||
1066 | fetch = self.findincoming(remote, force=force) |
|
1070 | fetch = self.findincoming(remote, force=force) | |
1067 | if fetch == [nullid]: |
|
1071 | if fetch == [nullid]: | |
1068 | self.ui.status(_("requesting all changes\n")) |
|
1072 | self.ui.status(_("requesting all changes\n")) | |
1069 |
|
1073 | |||
1070 | if not fetch: |
|
1074 | if not fetch: | |
1071 | self.ui.status(_("no changes found\n")) |
|
1075 | self.ui.status(_("no changes found\n")) | |
1072 | return 0 |
|
1076 | return 0 | |
1073 |
|
1077 | |||
1074 | if heads is None: |
|
1078 | if heads is None: | |
1075 | cg = remote.changegroup(fetch, 'pull') |
|
1079 | cg = remote.changegroup(fetch, 'pull') | |
1076 | else: |
|
1080 | else: | |
1077 | cg = remote.changegroupsubset(fetch, heads, 'pull') |
|
1081 | cg = remote.changegroupsubset(fetch, heads, 'pull') | |
1078 | return self.addchangegroup(cg) |
|
1082 | return self.addchangegroup(cg) | |
1079 |
|
1083 | |||
1080 | def push(self, remote, force=False, revs=None): |
|
1084 | def push(self, remote, force=False, revs=None): | |
1081 | lock = remote.lock() |
|
1085 | lock = remote.lock() | |
1082 |
|
1086 | |||
1083 | base = {} |
|
1087 | base = {} | |
1084 | remote_heads = remote.heads() |
|
1088 | remote_heads = remote.heads() | |
1085 | inc = self.findincoming(remote, base, remote_heads, force=force) |
|
1089 | inc = self.findincoming(remote, base, remote_heads, force=force) | |
1086 | if not force and inc: |
|
1090 | if not force and inc: | |
1087 | self.ui.warn(_("abort: unsynced remote changes!\n")) |
|
1091 | self.ui.warn(_("abort: unsynced remote changes!\n")) | |
1088 | self.ui.status(_("(did you forget to sync?" |
|
1092 | self.ui.status(_("(did you forget to sync?" | |
1089 | " use push -f to force)\n")) |
|
1093 | " use push -f to force)\n")) | |
1090 | return 1 |
|
1094 | return 1 | |
1091 |
|
1095 | |||
1092 | update, updated_heads = self.findoutgoing(remote, base, remote_heads) |
|
1096 | update, updated_heads = self.findoutgoing(remote, base, remote_heads) | |
1093 | if revs is not None: |
|
1097 | if revs is not None: | |
1094 | msng_cl, bases, heads = self.changelog.nodesbetween(update, revs) |
|
1098 | msng_cl, bases, heads = self.changelog.nodesbetween(update, revs) | |
1095 | else: |
|
1099 | else: | |
1096 | bases, heads = update, self.changelog.heads() |
|
1100 | bases, heads = update, self.changelog.heads() | |
1097 |
|
1101 | |||
1098 | if not bases: |
|
1102 | if not bases: | |
1099 | self.ui.status(_("no changes found\n")) |
|
1103 | self.ui.status(_("no changes found\n")) | |
1100 | return 1 |
|
1104 | return 1 | |
1101 | elif not force: |
|
1105 | elif not force: | |
1102 | # FIXME we don't properly detect creation of new heads |
|
1106 | # FIXME we don't properly detect creation of new heads | |
1103 | # in the push -r case, assume the user knows what he's doing |
|
1107 | # in the push -r case, assume the user knows what he's doing | |
1104 | if not revs and len(remote_heads) < len(heads) \ |
|
1108 | if not revs and len(remote_heads) < len(heads) \ | |
1105 | and remote_heads != [nullid]: |
|
1109 | and remote_heads != [nullid]: | |
1106 | self.ui.warn(_("abort: push creates new remote branches!\n")) |
|
1110 | self.ui.warn(_("abort: push creates new remote branches!\n")) | |
1107 | self.ui.status(_("(did you forget to merge?" |
|
1111 | self.ui.status(_("(did you forget to merge?" | |
1108 | " use push -f to force)\n")) |
|
1112 | " use push -f to force)\n")) | |
1109 | return 1 |
|
1113 | return 1 | |
1110 |
|
1114 | |||
1111 | if revs is None: |
|
1115 | if revs is None: | |
1112 | cg = self.changegroup(update, 'push') |
|
1116 | cg = self.changegroup(update, 'push') | |
1113 | else: |
|
1117 | else: | |
1114 | cg = self.changegroupsubset(update, revs, 'push') |
|
1118 | cg = self.changegroupsubset(update, revs, 'push') | |
1115 | return remote.addchangegroup(cg) |
|
1119 | return remote.addchangegroup(cg) | |
1116 |
|
1120 | |||
1117 | def changegroupsubset(self, bases, heads, source): |
|
1121 | def changegroupsubset(self, bases, heads, source): | |
1118 | """This function generates a changegroup consisting of all the nodes |
|
1122 | """This function generates a changegroup consisting of all the nodes | |
1119 | that are descendents of any of the bases, and ancestors of any of |
|
1123 | that are descendents of any of the bases, and ancestors of any of | |
1120 | the heads. |
|
1124 | the heads. | |
1121 |
|
1125 | |||
1122 | It is fairly complex as determining which filenodes and which |
|
1126 | It is fairly complex as determining which filenodes and which | |
1123 | manifest nodes need to be included for the changeset to be complete |
|
1127 | manifest nodes need to be included for the changeset to be complete | |
1124 | is non-trivial. |
|
1128 | is non-trivial. | |
1125 |
|
1129 | |||
1126 | Another wrinkle is doing the reverse, figuring out which changeset in |
|
1130 | Another wrinkle is doing the reverse, figuring out which changeset in | |
1127 | the changegroup a particular filenode or manifestnode belongs to.""" |
|
1131 | the changegroup a particular filenode or manifestnode belongs to.""" | |
1128 |
|
1132 | |||
1129 | self.hook('preoutgoing', throw=True, source=source) |
|
1133 | self.hook('preoutgoing', throw=True, source=source) | |
1130 |
|
1134 | |||
1131 | # Set up some initial variables |
|
1135 | # Set up some initial variables | |
1132 | # Make it easy to refer to self.changelog |
|
1136 | # Make it easy to refer to self.changelog | |
1133 | cl = self.changelog |
|
1137 | cl = self.changelog | |
1134 | # msng is short for missing - compute the list of changesets in this |
|
1138 | # msng is short for missing - compute the list of changesets in this | |
1135 | # changegroup. |
|
1139 | # changegroup. | |
1136 | msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads) |
|
1140 | msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads) | |
1137 | # Some bases may turn out to be superfluous, and some heads may be |
|
1141 | # Some bases may turn out to be superfluous, and some heads may be | |
1138 | # too. nodesbetween will return the minimal set of bases and heads |
|
1142 | # too. nodesbetween will return the minimal set of bases and heads | |
1139 | # necessary to re-create the changegroup. |
|
1143 | # necessary to re-create the changegroup. | |
1140 |
|
1144 | |||
1141 | # Known heads are the list of heads that it is assumed the recipient |
|
1145 | # Known heads are the list of heads that it is assumed the recipient | |
1142 | # of this changegroup will know about. |
|
1146 | # of this changegroup will know about. | |
1143 | knownheads = {} |
|
1147 | knownheads = {} | |
1144 | # We assume that all parents of bases are known heads. |
|
1148 | # We assume that all parents of bases are known heads. | |
1145 | for n in bases: |
|
1149 | for n in bases: | |
1146 | for p in cl.parents(n): |
|
1150 | for p in cl.parents(n): | |
1147 | if p != nullid: |
|
1151 | if p != nullid: | |
1148 | knownheads[p] = 1 |
|
1152 | knownheads[p] = 1 | |
1149 | knownheads = knownheads.keys() |
|
1153 | knownheads = knownheads.keys() | |
1150 | if knownheads: |
|
1154 | if knownheads: | |
1151 | # Now that we know what heads are known, we can compute which |
|
1155 | # Now that we know what heads are known, we can compute which | |
1152 | # changesets are known. The recipient must know about all |
|
1156 | # changesets are known. The recipient must know about all | |
1153 | # changesets required to reach the known heads from the null |
|
1157 | # changesets required to reach the known heads from the null | |
1154 | # changeset. |
|
1158 | # changeset. | |
1155 | has_cl_set, junk, junk = cl.nodesbetween(None, knownheads) |
|
1159 | has_cl_set, junk, junk = cl.nodesbetween(None, knownheads) | |
1156 | junk = None |
|
1160 | junk = None | |
1157 | # Transform the list into an ersatz set. |
|
1161 | # Transform the list into an ersatz set. | |
1158 | has_cl_set = dict.fromkeys(has_cl_set) |
|
1162 | has_cl_set = dict.fromkeys(has_cl_set) | |
1159 | else: |
|
1163 | else: | |
1160 | # If there were no known heads, the recipient cannot be assumed to |
|
1164 | # If there were no known heads, the recipient cannot be assumed to | |
1161 | # know about any changesets. |
|
1165 | # know about any changesets. | |
1162 | has_cl_set = {} |
|
1166 | has_cl_set = {} | |
1163 |
|
1167 | |||
1164 | # Make it easy to refer to self.manifest |
|
1168 | # Make it easy to refer to self.manifest | |
1165 | mnfst = self.manifest |
|
1169 | mnfst = self.manifest | |
1166 | # We don't know which manifests are missing yet |
|
1170 | # We don't know which manifests are missing yet | |
1167 | msng_mnfst_set = {} |
|
1171 | msng_mnfst_set = {} | |
1168 | # Nor do we know which filenodes are missing. |
|
1172 | # Nor do we know which filenodes are missing. | |
1169 | msng_filenode_set = {} |
|
1173 | msng_filenode_set = {} | |
1170 |
|
1174 | |||
1171 | junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex |
|
1175 | junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex | |
1172 | junk = None |
|
1176 | junk = None | |
1173 |
|
1177 | |||
1174 | # A changeset always belongs to itself, so the changenode lookup |
|
1178 | # A changeset always belongs to itself, so the changenode lookup | |
1175 | # function for a changenode is identity. |
|
1179 | # function for a changenode is identity. | |
1176 | def identity(x): |
|
1180 | def identity(x): | |
1177 | return x |
|
1181 | return x | |
1178 |
|
1182 | |||
1179 | # A function generating function. Sets up an environment for the |
|
1183 | # A function generating function. Sets up an environment for the | |
1180 | # inner function. |
|
1184 | # inner function. | |
1181 | def cmp_by_rev_func(revlog): |
|
1185 | def cmp_by_rev_func(revlog): | |
1182 | # Compare two nodes by their revision number in the environment's |
|
1186 | # Compare two nodes by their revision number in the environment's | |
1183 | # revision history. Since the revision number both represents the |
|
1187 | # revision history. Since the revision number both represents the | |
1184 | # most efficient order to read the nodes in, and represents a |
|
1188 | # most efficient order to read the nodes in, and represents a | |
1185 | # topological sorting of the nodes, this function is often useful. |
|
1189 | # topological sorting of the nodes, this function is often useful. | |
1186 | def cmp_by_rev(a, b): |
|
1190 | def cmp_by_rev(a, b): | |
1187 | return cmp(revlog.rev(a), revlog.rev(b)) |
|
1191 | return cmp(revlog.rev(a), revlog.rev(b)) | |
1188 | return cmp_by_rev |
|
1192 | return cmp_by_rev | |
1189 |
|
1193 | |||
1190 | # If we determine that a particular file or manifest node must be a |
|
1194 | # If we determine that a particular file or manifest node must be a | |
1191 | # node that the recipient of the changegroup will already have, we can |
|
1195 | # node that the recipient of the changegroup will already have, we can | |
1192 | # also assume the recipient will have all the parents. This function |
|
1196 | # also assume the recipient will have all the parents. This function | |
1193 | # prunes them from the set of missing nodes. |
|
1197 | # prunes them from the set of missing nodes. | |
1194 | def prune_parents(revlog, hasset, msngset): |
|
1198 | def prune_parents(revlog, hasset, msngset): | |
1195 | haslst = hasset.keys() |
|
1199 | haslst = hasset.keys() | |
1196 | haslst.sort(cmp_by_rev_func(revlog)) |
|
1200 | haslst.sort(cmp_by_rev_func(revlog)) | |
1197 | for node in haslst: |
|
1201 | for node in haslst: | |
1198 | parentlst = [p for p in revlog.parents(node) if p != nullid] |
|
1202 | parentlst = [p for p in revlog.parents(node) if p != nullid] | |
1199 | while parentlst: |
|
1203 | while parentlst: | |
1200 | n = parentlst.pop() |
|
1204 | n = parentlst.pop() | |
1201 | if n not in hasset: |
|
1205 | if n not in hasset: | |
1202 | hasset[n] = 1 |
|
1206 | hasset[n] = 1 | |
1203 | p = [p for p in revlog.parents(n) if p != nullid] |
|
1207 | p = [p for p in revlog.parents(n) if p != nullid] | |
1204 | parentlst.extend(p) |
|
1208 | parentlst.extend(p) | |
1205 | for n in hasset: |
|
1209 | for n in hasset: | |
1206 | msngset.pop(n, None) |
|
1210 | msngset.pop(n, None) | |
1207 |
|
1211 | |||
1208 | # This is a function generating function used to set up an environment |
|
1212 | # This is a function generating function used to set up an environment | |
1209 | # for the inner function to execute in. |
|
1213 | # for the inner function to execute in. | |
1210 | def manifest_and_file_collector(changedfileset): |
|
1214 | def manifest_and_file_collector(changedfileset): | |
1211 | # This is an information gathering function that gathers |
|
1215 | # This is an information gathering function that gathers | |
1212 | # information from each changeset node that goes out as part of |
|
1216 | # information from each changeset node that goes out as part of | |
1213 | # the changegroup. The information gathered is a list of which |
|
1217 | # the changegroup. The information gathered is a list of which | |
1214 | # manifest nodes are potentially required (the recipient may |
|
1218 | # manifest nodes are potentially required (the recipient may | |
1215 | # already have them) and total list of all files which were |
|
1219 | # already have them) and total list of all files which were | |
1216 | # changed in any changeset in the changegroup. |
|
1220 | # changed in any changeset in the changegroup. | |
1217 | # |
|
1221 | # | |
1218 | # We also remember the first changenode we saw any manifest |
|
1222 | # We also remember the first changenode we saw any manifest | |
1219 | # referenced by so we can later determine which changenode 'owns' |
|
1223 | # referenced by so we can later determine which changenode 'owns' | |
1220 | # the manifest. |
|
1224 | # the manifest. | |
1221 | def collect_manifests_and_files(clnode): |
|
1225 | def collect_manifests_and_files(clnode): | |
1222 | c = cl.read(clnode) |
|
1226 | c = cl.read(clnode) | |
1223 | for f in c[3]: |
|
1227 | for f in c[3]: | |
1224 | # This is to make sure we only have one instance of each |
|
1228 | # This is to make sure we only have one instance of each | |
1225 | # filename string for each filename. |
|
1229 | # filename string for each filename. | |
1226 | changedfileset.setdefault(f, f) |
|
1230 | changedfileset.setdefault(f, f) | |
1227 | msng_mnfst_set.setdefault(c[0], clnode) |
|
1231 | msng_mnfst_set.setdefault(c[0], clnode) | |
1228 | return collect_manifests_and_files |
|
1232 | return collect_manifests_and_files | |
1229 |
|
1233 | |||
1230 | # Figure out which manifest nodes (of the ones we think might be part |
|
1234 | # Figure out which manifest nodes (of the ones we think might be part | |
1231 | # of the changegroup) the recipient must know about and remove them |
|
1235 | # of the changegroup) the recipient must know about and remove them | |
1232 | # from the changegroup. |
|
1236 | # from the changegroup. | |
1233 | def prune_manifests(): |
|
1237 | def prune_manifests(): | |
1234 | has_mnfst_set = {} |
|
1238 | has_mnfst_set = {} | |
1235 | for n in msng_mnfst_set: |
|
1239 | for n in msng_mnfst_set: | |
1236 | # If a 'missing' manifest thinks it belongs to a changenode |
|
1240 | # If a 'missing' manifest thinks it belongs to a changenode | |
1237 | # the recipient is assumed to have, obviously the recipient |
|
1241 | # the recipient is assumed to have, obviously the recipient | |
1238 | # must have that manifest. |
|
1242 | # must have that manifest. | |
1239 | linknode = cl.node(mnfst.linkrev(n)) |
|
1243 | linknode = cl.node(mnfst.linkrev(n)) | |
1240 | if linknode in has_cl_set: |
|
1244 | if linknode in has_cl_set: | |
1241 | has_mnfst_set[n] = 1 |
|
1245 | has_mnfst_set[n] = 1 | |
1242 | prune_parents(mnfst, has_mnfst_set, msng_mnfst_set) |
|
1246 | prune_parents(mnfst, has_mnfst_set, msng_mnfst_set) | |
1243 |
|
1247 | |||
1244 | # Use the information collected in collect_manifests_and_files to say |
|
1248 | # Use the information collected in collect_manifests_and_files to say | |
1245 | # which changenode any manifestnode belongs to. |
|
1249 | # which changenode any manifestnode belongs to. | |
1246 | def lookup_manifest_link(mnfstnode): |
|
1250 | def lookup_manifest_link(mnfstnode): | |
1247 | return msng_mnfst_set[mnfstnode] |
|
1251 | return msng_mnfst_set[mnfstnode] | |
1248 |
|
1252 | |||
1249 | # A function generating function that sets up the initial environment |
|
1253 | # A function generating function that sets up the initial environment | |
1250 | # the inner function. |
|
1254 | # the inner function. | |
1251 | def filenode_collector(changedfiles): |
|
1255 | def filenode_collector(changedfiles): | |
1252 | next_rev = [0] |
|
1256 | next_rev = [0] | |
1253 | # This gathers information from each manifestnode included in the |
|
1257 | # This gathers information from each manifestnode included in the | |
1254 | # changegroup about which filenodes the manifest node references |
|
1258 | # changegroup about which filenodes the manifest node references | |
1255 | # so we can include those in the changegroup too. |
|
1259 | # so we can include those in the changegroup too. | |
1256 | # |
|
1260 | # | |
1257 | # It also remembers which changenode each filenode belongs to. It |
|
1261 | # It also remembers which changenode each filenode belongs to. It | |
1258 | # does this by assuming the a filenode belongs to the changenode |
|
1262 | # does this by assuming the a filenode belongs to the changenode | |
1259 | # the first manifest that references it belongs to. |
|
1263 | # the first manifest that references it belongs to. | |
1260 | def collect_msng_filenodes(mnfstnode): |
|
1264 | def collect_msng_filenodes(mnfstnode): | |
1261 | r = mnfst.rev(mnfstnode) |
|
1265 | r = mnfst.rev(mnfstnode) | |
1262 | if r == next_rev[0]: |
|
1266 | if r == next_rev[0]: | |
1263 | # If the last rev we looked at was the one just previous, |
|
1267 | # If the last rev we looked at was the one just previous, | |
1264 | # we only need to see a diff. |
|
1268 | # we only need to see a diff. | |
1265 | delta = mdiff.patchtext(mnfst.delta(mnfstnode)) |
|
1269 | delta = mdiff.patchtext(mnfst.delta(mnfstnode)) | |
1266 | # For each line in the delta |
|
1270 | # For each line in the delta | |
1267 | for dline in delta.splitlines(): |
|
1271 | for dline in delta.splitlines(): | |
1268 | # get the filename and filenode for that line |
|
1272 | # get the filename and filenode for that line | |
1269 | f, fnode = dline.split('\0') |
|
1273 | f, fnode = dline.split('\0') | |
1270 | fnode = bin(fnode[:40]) |
|
1274 | fnode = bin(fnode[:40]) | |
1271 | f = changedfiles.get(f, None) |
|
1275 | f = changedfiles.get(f, None) | |
1272 | # And if the file is in the list of files we care |
|
1276 | # And if the file is in the list of files we care | |
1273 | # about. |
|
1277 | # about. | |
1274 | if f is not None: |
|
1278 | if f is not None: | |
1275 | # Get the changenode this manifest belongs to |
|
1279 | # Get the changenode this manifest belongs to | |
1276 | clnode = msng_mnfst_set[mnfstnode] |
|
1280 | clnode = msng_mnfst_set[mnfstnode] | |
1277 | # Create the set of filenodes for the file if |
|
1281 | # Create the set of filenodes for the file if | |
1278 | # there isn't one already. |
|
1282 | # there isn't one already. | |
1279 | ndset = msng_filenode_set.setdefault(f, {}) |
|
1283 | ndset = msng_filenode_set.setdefault(f, {}) | |
1280 | # And set the filenode's changelog node to the |
|
1284 | # And set the filenode's changelog node to the | |
1281 | # manifest's if it hasn't been set already. |
|
1285 | # manifest's if it hasn't been set already. | |
1282 | ndset.setdefault(fnode, clnode) |
|
1286 | ndset.setdefault(fnode, clnode) | |
1283 | else: |
|
1287 | else: | |
1284 | # Otherwise we need a full manifest. |
|
1288 | # Otherwise we need a full manifest. | |
1285 | m = mnfst.read(mnfstnode) |
|
1289 | m = mnfst.read(mnfstnode) | |
1286 | # For every file in we care about. |
|
1290 | # For every file in we care about. | |
1287 | for f in changedfiles: |
|
1291 | for f in changedfiles: | |
1288 | fnode = m.get(f, None) |
|
1292 | fnode = m.get(f, None) | |
1289 | # If it's in the manifest |
|
1293 | # If it's in the manifest | |
1290 | if fnode is not None: |
|
1294 | if fnode is not None: | |
1291 | # See comments above. |
|
1295 | # See comments above. | |
1292 | clnode = msng_mnfst_set[mnfstnode] |
|
1296 | clnode = msng_mnfst_set[mnfstnode] | |
1293 | ndset = msng_filenode_set.setdefault(f, {}) |
|
1297 | ndset = msng_filenode_set.setdefault(f, {}) | |
1294 | ndset.setdefault(fnode, clnode) |
|
1298 | ndset.setdefault(fnode, clnode) | |
1295 | # Remember the revision we hope to see next. |
|
1299 | # Remember the revision we hope to see next. | |
1296 | next_rev[0] = r + 1 |
|
1300 | next_rev[0] = r + 1 | |
1297 | return collect_msng_filenodes |
|
1301 | return collect_msng_filenodes | |
1298 |
|
1302 | |||
1299 | # We have a list of filenodes we think we need for a file, lets remove |
|
1303 | # We have a list of filenodes we think we need for a file, lets remove | |
1300 | # all those we now the recipient must have. |
|
1304 | # all those we now the recipient must have. | |
1301 | def prune_filenodes(f, filerevlog): |
|
1305 | def prune_filenodes(f, filerevlog): | |
1302 | msngset = msng_filenode_set[f] |
|
1306 | msngset = msng_filenode_set[f] | |
1303 | hasset = {} |
|
1307 | hasset = {} | |
1304 | # If a 'missing' filenode thinks it belongs to a changenode we |
|
1308 | # If a 'missing' filenode thinks it belongs to a changenode we | |
1305 | # assume the recipient must have, then the recipient must have |
|
1309 | # assume the recipient must have, then the recipient must have | |
1306 | # that filenode. |
|
1310 | # that filenode. | |
1307 | for n in msngset: |
|
1311 | for n in msngset: | |
1308 | clnode = cl.node(filerevlog.linkrev(n)) |
|
1312 | clnode = cl.node(filerevlog.linkrev(n)) | |
1309 | if clnode in has_cl_set: |
|
1313 | if clnode in has_cl_set: | |
1310 | hasset[n] = 1 |
|
1314 | hasset[n] = 1 | |
1311 | prune_parents(filerevlog, hasset, msngset) |
|
1315 | prune_parents(filerevlog, hasset, msngset) | |
1312 |
|
1316 | |||
1313 | # A function generator function that sets up the a context for the |
|
1317 | # A function generator function that sets up the a context for the | |
1314 | # inner function. |
|
1318 | # inner function. | |
1315 | def lookup_filenode_link_func(fname): |
|
1319 | def lookup_filenode_link_func(fname): | |
1316 | msngset = msng_filenode_set[fname] |
|
1320 | msngset = msng_filenode_set[fname] | |
1317 | # Lookup the changenode the filenode belongs to. |
|
1321 | # Lookup the changenode the filenode belongs to. | |
1318 | def lookup_filenode_link(fnode): |
|
1322 | def lookup_filenode_link(fnode): | |
1319 | return msngset[fnode] |
|
1323 | return msngset[fnode] | |
1320 | return lookup_filenode_link |
|
1324 | return lookup_filenode_link | |
1321 |
|
1325 | |||
1322 | # Now that we have all theses utility functions to help out and |
|
1326 | # Now that we have all theses utility functions to help out and | |
1323 | # logically divide up the task, generate the group. |
|
1327 | # logically divide up the task, generate the group. | |
1324 | def gengroup(): |
|
1328 | def gengroup(): | |
1325 | # The set of changed files starts empty. |
|
1329 | # The set of changed files starts empty. | |
1326 | changedfiles = {} |
|
1330 | changedfiles = {} | |
1327 | # Create a changenode group generator that will call our functions |
|
1331 | # Create a changenode group generator that will call our functions | |
1328 | # back to lookup the owning changenode and collect information. |
|
1332 | # back to lookup the owning changenode and collect information. | |
1329 | group = cl.group(msng_cl_lst, identity, |
|
1333 | group = cl.group(msng_cl_lst, identity, | |
1330 | manifest_and_file_collector(changedfiles)) |
|
1334 | manifest_and_file_collector(changedfiles)) | |
1331 | for chnk in group: |
|
1335 | for chnk in group: | |
1332 | yield chnk |
|
1336 | yield chnk | |
1333 |
|
1337 | |||
1334 | # The list of manifests has been collected by the generator |
|
1338 | # The list of manifests has been collected by the generator | |
1335 | # calling our functions back. |
|
1339 | # calling our functions back. | |
1336 | prune_manifests() |
|
1340 | prune_manifests() | |
1337 | msng_mnfst_lst = msng_mnfst_set.keys() |
|
1341 | msng_mnfst_lst = msng_mnfst_set.keys() | |
1338 | # Sort the manifestnodes by revision number. |
|
1342 | # Sort the manifestnodes by revision number. | |
1339 | msng_mnfst_lst.sort(cmp_by_rev_func(mnfst)) |
|
1343 | msng_mnfst_lst.sort(cmp_by_rev_func(mnfst)) | |
1340 | # Create a generator for the manifestnodes that calls our lookup |
|
1344 | # Create a generator for the manifestnodes that calls our lookup | |
1341 | # and data collection functions back. |
|
1345 | # and data collection functions back. | |
1342 | group = mnfst.group(msng_mnfst_lst, lookup_manifest_link, |
|
1346 | group = mnfst.group(msng_mnfst_lst, lookup_manifest_link, | |
1343 | filenode_collector(changedfiles)) |
|
1347 | filenode_collector(changedfiles)) | |
1344 | for chnk in group: |
|
1348 | for chnk in group: | |
1345 | yield chnk |
|
1349 | yield chnk | |
1346 |
|
1350 | |||
1347 | # These are no longer needed, dereference and toss the memory for |
|
1351 | # These are no longer needed, dereference and toss the memory for | |
1348 | # them. |
|
1352 | # them. | |
1349 | msng_mnfst_lst = None |
|
1353 | msng_mnfst_lst = None | |
1350 | msng_mnfst_set.clear() |
|
1354 | msng_mnfst_set.clear() | |
1351 |
|
1355 | |||
1352 | changedfiles = changedfiles.keys() |
|
1356 | changedfiles = changedfiles.keys() | |
1353 | changedfiles.sort() |
|
1357 | changedfiles.sort() | |
1354 | # Go through all our files in order sorted by name. |
|
1358 | # Go through all our files in order sorted by name. | |
1355 | for fname in changedfiles: |
|
1359 | for fname in changedfiles: | |
1356 | filerevlog = self.file(fname) |
|
1360 | filerevlog = self.file(fname) | |
1357 | # Toss out the filenodes that the recipient isn't really |
|
1361 | # Toss out the filenodes that the recipient isn't really | |
1358 | # missing. |
|
1362 | # missing. | |
1359 | if msng_filenode_set.has_key(fname): |
|
1363 | if msng_filenode_set.has_key(fname): | |
1360 | prune_filenodes(fname, filerevlog) |
|
1364 | prune_filenodes(fname, filerevlog) | |
1361 | msng_filenode_lst = msng_filenode_set[fname].keys() |
|
1365 | msng_filenode_lst = msng_filenode_set[fname].keys() | |
1362 | else: |
|
1366 | else: | |
1363 | msng_filenode_lst = [] |
|
1367 | msng_filenode_lst = [] | |
1364 | # If any filenodes are left, generate the group for them, |
|
1368 | # If any filenodes are left, generate the group for them, | |
1365 | # otherwise don't bother. |
|
1369 | # otherwise don't bother. | |
1366 | if len(msng_filenode_lst) > 0: |
|
1370 | if len(msng_filenode_lst) > 0: | |
1367 | yield changegroup.genchunk(fname) |
|
1371 | yield changegroup.genchunk(fname) | |
1368 | # Sort the filenodes by their revision # |
|
1372 | # Sort the filenodes by their revision # | |
1369 | msng_filenode_lst.sort(cmp_by_rev_func(filerevlog)) |
|
1373 | msng_filenode_lst.sort(cmp_by_rev_func(filerevlog)) | |
1370 | # Create a group generator and only pass in a changenode |
|
1374 | # Create a group generator and only pass in a changenode | |
1371 | # lookup function as we need to collect no information |
|
1375 | # lookup function as we need to collect no information | |
1372 | # from filenodes. |
|
1376 | # from filenodes. | |
1373 | group = filerevlog.group(msng_filenode_lst, |
|
1377 | group = filerevlog.group(msng_filenode_lst, | |
1374 | lookup_filenode_link_func(fname)) |
|
1378 | lookup_filenode_link_func(fname)) | |
1375 | for chnk in group: |
|
1379 | for chnk in group: | |
1376 | yield chnk |
|
1380 | yield chnk | |
1377 | if msng_filenode_set.has_key(fname): |
|
1381 | if msng_filenode_set.has_key(fname): | |
1378 | # Don't need this anymore, toss it to free memory. |
|
1382 | # Don't need this anymore, toss it to free memory. | |
1379 | del msng_filenode_set[fname] |
|
1383 | del msng_filenode_set[fname] | |
1380 | # Signal that no more groups are left. |
|
1384 | # Signal that no more groups are left. | |
1381 | yield changegroup.closechunk() |
|
1385 | yield changegroup.closechunk() | |
1382 |
|
1386 | |||
1383 | if msng_cl_lst: |
|
1387 | if msng_cl_lst: | |
1384 | self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source) |
|
1388 | self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source) | |
1385 |
|
1389 | |||
1386 | return util.chunkbuffer(gengroup()) |
|
1390 | return util.chunkbuffer(gengroup()) | |
1387 |
|
1391 | |||
1388 | def changegroup(self, basenodes, source): |
|
1392 | def changegroup(self, basenodes, source): | |
1389 | """Generate a changegroup of all nodes that we have that a recipient |
|
1393 | """Generate a changegroup of all nodes that we have that a recipient | |
1390 | doesn't. |
|
1394 | doesn't. | |
1391 |
|
1395 | |||
1392 | This is much easier than the previous function as we can assume that |
|
1396 | This is much easier than the previous function as we can assume that | |
1393 | the recipient has any changenode we aren't sending them.""" |
|
1397 | the recipient has any changenode we aren't sending them.""" | |
1394 |
|
1398 | |||
1395 | self.hook('preoutgoing', throw=True, source=source) |
|
1399 | self.hook('preoutgoing', throw=True, source=source) | |
1396 |
|
1400 | |||
1397 | cl = self.changelog |
|
1401 | cl = self.changelog | |
1398 | nodes = cl.nodesbetween(basenodes, None)[0] |
|
1402 | nodes = cl.nodesbetween(basenodes, None)[0] | |
1399 | revset = dict.fromkeys([cl.rev(n) for n in nodes]) |
|
1403 | revset = dict.fromkeys([cl.rev(n) for n in nodes]) | |
1400 |
|
1404 | |||
1401 | def identity(x): |
|
1405 | def identity(x): | |
1402 | return x |
|
1406 | return x | |
1403 |
|
1407 | |||
1404 | def gennodelst(revlog): |
|
1408 | def gennodelst(revlog): | |
1405 | for r in xrange(0, revlog.count()): |
|
1409 | for r in xrange(0, revlog.count()): | |
1406 | n = revlog.node(r) |
|
1410 | n = revlog.node(r) | |
1407 | if revlog.linkrev(n) in revset: |
|
1411 | if revlog.linkrev(n) in revset: | |
1408 | yield n |
|
1412 | yield n | |
1409 |
|
1413 | |||
1410 | def changed_file_collector(changedfileset): |
|
1414 | def changed_file_collector(changedfileset): | |
1411 | def collect_changed_files(clnode): |
|
1415 | def collect_changed_files(clnode): | |
1412 | c = cl.read(clnode) |
|
1416 | c = cl.read(clnode) | |
1413 | for fname in c[3]: |
|
1417 | for fname in c[3]: | |
1414 | changedfileset[fname] = 1 |
|
1418 | changedfileset[fname] = 1 | |
1415 | return collect_changed_files |
|
1419 | return collect_changed_files | |
1416 |
|
1420 | |||
1417 | def lookuprevlink_func(revlog): |
|
1421 | def lookuprevlink_func(revlog): | |
1418 | def lookuprevlink(n): |
|
1422 | def lookuprevlink(n): | |
1419 | return cl.node(revlog.linkrev(n)) |
|
1423 | return cl.node(revlog.linkrev(n)) | |
1420 | return lookuprevlink |
|
1424 | return lookuprevlink | |
1421 |
|
1425 | |||
1422 | def gengroup(): |
|
1426 | def gengroup(): | |
1423 | # construct a list of all changed files |
|
1427 | # construct a list of all changed files | |
1424 | changedfiles = {} |
|
1428 | changedfiles = {} | |
1425 |
|
1429 | |||
1426 | for chnk in cl.group(nodes, identity, |
|
1430 | for chnk in cl.group(nodes, identity, | |
1427 | changed_file_collector(changedfiles)): |
|
1431 | changed_file_collector(changedfiles)): | |
1428 | yield chnk |
|
1432 | yield chnk | |
1429 | changedfiles = changedfiles.keys() |
|
1433 | changedfiles = changedfiles.keys() | |
1430 | changedfiles.sort() |
|
1434 | changedfiles.sort() | |
1431 |
|
1435 | |||
1432 | mnfst = self.manifest |
|
1436 | mnfst = self.manifest | |
1433 | nodeiter = gennodelst(mnfst) |
|
1437 | nodeiter = gennodelst(mnfst) | |
1434 | for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)): |
|
1438 | for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)): | |
1435 | yield chnk |
|
1439 | yield chnk | |
1436 |
|
1440 | |||
1437 | for fname in changedfiles: |
|
1441 | for fname in changedfiles: | |
1438 | filerevlog = self.file(fname) |
|
1442 | filerevlog = self.file(fname) | |
1439 | nodeiter = gennodelst(filerevlog) |
|
1443 | nodeiter = gennodelst(filerevlog) | |
1440 | nodeiter = list(nodeiter) |
|
1444 | nodeiter = list(nodeiter) | |
1441 | if nodeiter: |
|
1445 | if nodeiter: | |
1442 | yield changegroup.genchunk(fname) |
|
1446 | yield changegroup.genchunk(fname) | |
1443 | lookup = lookuprevlink_func(filerevlog) |
|
1447 | lookup = lookuprevlink_func(filerevlog) | |
1444 | for chnk in filerevlog.group(nodeiter, lookup): |
|
1448 | for chnk in filerevlog.group(nodeiter, lookup): | |
1445 | yield chnk |
|
1449 | yield chnk | |
1446 |
|
1450 | |||
1447 | yield changegroup.closechunk() |
|
1451 | yield changegroup.closechunk() | |
1448 |
|
1452 | |||
1449 | if nodes: |
|
1453 | if nodes: | |
1450 | self.hook('outgoing', node=hex(nodes[0]), source=source) |
|
1454 | self.hook('outgoing', node=hex(nodes[0]), source=source) | |
1451 |
|
1455 | |||
1452 | return util.chunkbuffer(gengroup()) |
|
1456 | return util.chunkbuffer(gengroup()) | |
1453 |
|
1457 | |||
1454 | def addchangegroup(self, source): |
|
1458 | def addchangegroup(self, source): | |
1455 | """add changegroup to repo. |
|
1459 | """add changegroup to repo. | |
1456 | returns number of heads modified or added + 1.""" |
|
1460 | returns number of heads modified or added + 1.""" | |
1457 |
|
1461 | |||
1458 | def csmap(x): |
|
1462 | def csmap(x): | |
1459 | self.ui.debug(_("add changeset %s\n") % short(x)) |
|
1463 | self.ui.debug(_("add changeset %s\n") % short(x)) | |
1460 | return cl.count() |
|
1464 | return cl.count() | |
1461 |
|
1465 | |||
1462 | def revmap(x): |
|
1466 | def revmap(x): | |
1463 | return cl.rev(x) |
|
1467 | return cl.rev(x) | |
1464 |
|
1468 | |||
1465 | if not source: |
|
1469 | if not source: | |
1466 | return 0 |
|
1470 | return 0 | |
1467 |
|
1471 | |||
1468 | self.hook('prechangegroup', throw=True) |
|
1472 | self.hook('prechangegroup', throw=True) | |
1469 |
|
1473 | |||
1470 | changesets = files = revisions = 0 |
|
1474 | changesets = files = revisions = 0 | |
1471 |
|
1475 | |||
1472 | tr = self.transaction() |
|
1476 | tr = self.transaction() | |
1473 |
|
1477 | |||
1474 | # write changelog and manifest data to temp files so |
|
1478 | # write changelog and manifest data to temp files so | |
1475 | # concurrent readers will not see inconsistent view |
|
1479 | # concurrent readers will not see inconsistent view | |
1476 | cl = appendfile.appendchangelog(self.opener, self.changelog.version) |
|
1480 | cl = appendfile.appendchangelog(self.opener, self.changelog.version) | |
1477 |
|
1481 | |||
1478 | oldheads = len(cl.heads()) |
|
1482 | oldheads = len(cl.heads()) | |
1479 |
|
1483 | |||
1480 | # pull off the changeset group |
|
1484 | # pull off the changeset group | |
1481 | self.ui.status(_("adding changesets\n")) |
|
1485 | self.ui.status(_("adding changesets\n")) | |
1482 | co = cl.tip() |
|
1486 | co = cl.tip() | |
1483 | chunkiter = changegroup.chunkiter(source) |
|
1487 | chunkiter = changegroup.chunkiter(source) | |
1484 | cn = cl.addgroup(chunkiter, csmap, tr, 1) # unique |
|
1488 | cn = cl.addgroup(chunkiter, csmap, tr, 1) # unique | |
1485 | cnr, cor = map(cl.rev, (cn, co)) |
|
1489 | cnr, cor = map(cl.rev, (cn, co)) | |
1486 | if cn == nullid: |
|
1490 | if cn == nullid: | |
1487 | cnr = cor |
|
1491 | cnr = cor | |
1488 | changesets = cnr - cor |
|
1492 | changesets = cnr - cor | |
1489 |
|
1493 | |||
1490 | mf = appendfile.appendmanifest(self.opener, self.manifest.version) |
|
1494 | mf = appendfile.appendmanifest(self.opener, self.manifest.version) | |
1491 |
|
1495 | |||
1492 | # pull off the manifest group |
|
1496 | # pull off the manifest group | |
1493 | self.ui.status(_("adding manifests\n")) |
|
1497 | self.ui.status(_("adding manifests\n")) | |
1494 | mm = mf.tip() |
|
1498 | mm = mf.tip() | |
1495 | chunkiter = changegroup.chunkiter(source) |
|
1499 | chunkiter = changegroup.chunkiter(source) | |
1496 | mo = mf.addgroup(chunkiter, revmap, tr) |
|
1500 | mo = mf.addgroup(chunkiter, revmap, tr) | |
1497 |
|
1501 | |||
1498 | # process the files |
|
1502 | # process the files | |
1499 | self.ui.status(_("adding file changes\n")) |
|
1503 | self.ui.status(_("adding file changes\n")) | |
1500 | while 1: |
|
1504 | while 1: | |
1501 | f = changegroup.getchunk(source) |
|
1505 | f = changegroup.getchunk(source) | |
1502 | if not f: |
|
1506 | if not f: | |
1503 | break |
|
1507 | break | |
1504 | self.ui.debug(_("adding %s revisions\n") % f) |
|
1508 | self.ui.debug(_("adding %s revisions\n") % f) | |
1505 | fl = self.file(f) |
|
1509 | fl = self.file(f) | |
1506 | o = fl.count() |
|
1510 | o = fl.count() | |
1507 | chunkiter = changegroup.chunkiter(source) |
|
1511 | chunkiter = changegroup.chunkiter(source) | |
1508 | n = fl.addgroup(chunkiter, revmap, tr) |
|
1512 | n = fl.addgroup(chunkiter, revmap, tr) | |
1509 | revisions += fl.count() - o |
|
1513 | revisions += fl.count() - o | |
1510 | files += 1 |
|
1514 | files += 1 | |
1511 |
|
1515 | |||
1512 | # write order here is important so concurrent readers will see |
|
1516 | # write order here is important so concurrent readers will see | |
1513 | # consistent view of repo |
|
1517 | # consistent view of repo | |
1514 | mf.writedata() |
|
1518 | mf.writedata() | |
1515 | cl.writedata() |
|
1519 | cl.writedata() | |
1516 |
|
1520 | |||
1517 | # make changelog and manifest see real files again |
|
1521 | # make changelog and manifest see real files again | |
1518 | self.changelog = changelog.changelog(self.opener, self.changelog.version) |
|
1522 | self.changelog = changelog.changelog(self.opener, self.changelog.version) | |
1519 | self.manifest = manifest.manifest(self.opener, self.manifest.version) |
|
1523 | self.manifest = manifest.manifest(self.opener, self.manifest.version) | |
1520 | self.changelog.checkinlinesize(tr) |
|
1524 | self.changelog.checkinlinesize(tr) | |
1521 | self.manifest.checkinlinesize(tr) |
|
1525 | self.manifest.checkinlinesize(tr) | |
1522 |
|
1526 | |||
1523 | newheads = len(self.changelog.heads()) |
|
1527 | newheads = len(self.changelog.heads()) | |
1524 | heads = "" |
|
1528 | heads = "" | |
1525 | if oldheads and newheads > oldheads: |
|
1529 | if oldheads and newheads > oldheads: | |
1526 | heads = _(" (+%d heads)") % (newheads - oldheads) |
|
1530 | heads = _(" (+%d heads)") % (newheads - oldheads) | |
1527 |
|
1531 | |||
1528 | self.ui.status(_("added %d changesets" |
|
1532 | self.ui.status(_("added %d changesets" | |
1529 | " with %d changes to %d files%s\n") |
|
1533 | " with %d changes to %d files%s\n") | |
1530 | % (changesets, revisions, files, heads)) |
|
1534 | % (changesets, revisions, files, heads)) | |
1531 |
|
1535 | |||
1532 | self.hook('pretxnchangegroup', throw=True, |
|
1536 | self.hook('pretxnchangegroup', throw=True, | |
1533 | node=hex(self.changelog.node(cor+1))) |
|
1537 | node=hex(self.changelog.node(cor+1))) | |
1534 |
|
1538 | |||
1535 | tr.close() |
|
1539 | tr.close() | |
1536 |
|
1540 | |||
1537 | if changesets > 0: |
|
1541 | if changesets > 0: | |
1538 | self.hook("changegroup", node=hex(self.changelog.node(cor+1))) |
|
1542 | self.hook("changegroup", node=hex(self.changelog.node(cor+1))) | |
1539 |
|
1543 | |||
1540 | for i in range(cor + 1, cnr + 1): |
|
1544 | for i in range(cor + 1, cnr + 1): | |
1541 | self.hook("incoming", node=hex(self.changelog.node(i))) |
|
1545 | self.hook("incoming", node=hex(self.changelog.node(i))) | |
1542 |
|
1546 | |||
1543 | return newheads - oldheads + 1 |
|
1547 | return newheads - oldheads + 1 | |
1544 |
|
1548 | |||
1545 | def update(self, node, allow=False, force=False, choose=None, |
|
1549 | def update(self, node, allow=False, force=False, choose=None, | |
1546 | moddirstate=True, forcemerge=False, wlock=None, show_stats=True): |
|
1550 | moddirstate=True, forcemerge=False, wlock=None, show_stats=True): | |
1547 | pl = self.dirstate.parents() |
|
1551 | pl = self.dirstate.parents() | |
1548 | if not force and pl[1] != nullid: |
|
1552 | if not force and pl[1] != nullid: | |
1549 | self.ui.warn(_("aborting: outstanding uncommitted merges\n")) |
|
1553 | self.ui.warn(_("aborting: outstanding uncommitted merges\n")) | |
1550 | return 1 |
|
1554 | return 1 | |
1551 |
|
1555 | |||
1552 | err = False |
|
1556 | err = False | |
1553 |
|
1557 | |||
1554 | p1, p2 = pl[0], node |
|
1558 | p1, p2 = pl[0], node | |
1555 | pa = self.changelog.ancestor(p1, p2) |
|
1559 | pa = self.changelog.ancestor(p1, p2) | |
1556 | m1n = self.changelog.read(p1)[0] |
|
1560 | m1n = self.changelog.read(p1)[0] | |
1557 | m2n = self.changelog.read(p2)[0] |
|
1561 | m2n = self.changelog.read(p2)[0] | |
1558 | man = self.manifest.ancestor(m1n, m2n) |
|
1562 | man = self.manifest.ancestor(m1n, m2n) | |
1559 | m1 = self.manifest.read(m1n) |
|
1563 | m1 = self.manifest.read(m1n) | |
1560 | mf1 = self.manifest.readflags(m1n) |
|
1564 | mf1 = self.manifest.readflags(m1n) | |
1561 | m2 = self.manifest.read(m2n).copy() |
|
1565 | m2 = self.manifest.read(m2n).copy() | |
1562 | mf2 = self.manifest.readflags(m2n) |
|
1566 | mf2 = self.manifest.readflags(m2n) | |
1563 | ma = self.manifest.read(man) |
|
1567 | ma = self.manifest.read(man) | |
1564 | mfa = self.manifest.readflags(man) |
|
1568 | mfa = self.manifest.readflags(man) | |
1565 |
|
1569 | |||
1566 | modified, added, removed, deleted, unknown = self.changes() |
|
1570 | modified, added, removed, deleted, unknown = self.changes() | |
1567 |
|
1571 | |||
1568 | # is this a jump, or a merge? i.e. is there a linear path |
|
1572 | # is this a jump, or a merge? i.e. is there a linear path | |
1569 | # from p1 to p2? |
|
1573 | # from p1 to p2? | |
1570 | linear_path = (pa == p1 or pa == p2) |
|
1574 | linear_path = (pa == p1 or pa == p2) | |
1571 |
|
1575 | |||
1572 | if allow and linear_path: |
|
1576 | if allow and linear_path: | |
1573 | raise util.Abort(_("there is nothing to merge, " |
|
1577 | raise util.Abort(_("there is nothing to merge, " | |
1574 | "just use 'hg update'")) |
|
1578 | "just use 'hg update'")) | |
1575 | if allow and not forcemerge: |
|
1579 | if allow and not forcemerge: | |
1576 | if modified or added or removed: |
|
1580 | if modified or added or removed: | |
1577 | raise util.Abort(_("outstanding uncommitted changes")) |
|
1581 | raise util.Abort(_("outstanding uncommitted changes")) | |
1578 | if not forcemerge and not force: |
|
1582 | if not forcemerge and not force: | |
1579 | for f in unknown: |
|
1583 | for f in unknown: | |
1580 | if f in m2: |
|
1584 | if f in m2: | |
1581 | t1 = self.wread(f) |
|
1585 | t1 = self.wread(f) | |
1582 | t2 = self.file(f).read(m2[f]) |
|
1586 | t2 = self.file(f).read(m2[f]) | |
1583 | if cmp(t1, t2) != 0: |
|
1587 | if cmp(t1, t2) != 0: | |
1584 | raise util.Abort(_("'%s' already exists in the working" |
|
1588 | raise util.Abort(_("'%s' already exists in the working" | |
1585 | " dir and differs from remote") % f) |
|
1589 | " dir and differs from remote") % f) | |
1586 |
|
1590 | |||
1587 | # resolve the manifest to determine which files |
|
1591 | # resolve the manifest to determine which files | |
1588 | # we care about merging |
|
1592 | # we care about merging | |
1589 | self.ui.note(_("resolving manifests\n")) |
|
1593 | self.ui.note(_("resolving manifests\n")) | |
1590 | self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") % |
|
1594 | self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") % | |
1591 | (force, allow, moddirstate, linear_path)) |
|
1595 | (force, allow, moddirstate, linear_path)) | |
1592 | self.ui.debug(_(" ancestor %s local %s remote %s\n") % |
|
1596 | self.ui.debug(_(" ancestor %s local %s remote %s\n") % | |
1593 | (short(man), short(m1n), short(m2n))) |
|
1597 | (short(man), short(m1n), short(m2n))) | |
1594 |
|
1598 | |||
1595 | merge = {} |
|
1599 | merge = {} | |
1596 | get = {} |
|
1600 | get = {} | |
1597 | remove = [] |
|
1601 | remove = [] | |
1598 |
|
1602 | |||
1599 | # construct a working dir manifest |
|
1603 | # construct a working dir manifest | |
1600 | mw = m1.copy() |
|
1604 | mw = m1.copy() | |
1601 | mfw = mf1.copy() |
|
1605 | mfw = mf1.copy() | |
1602 | umap = dict.fromkeys(unknown) |
|
1606 | umap = dict.fromkeys(unknown) | |
1603 |
|
1607 | |||
1604 | for f in added + modified + unknown: |
|
1608 | for f in added + modified + unknown: | |
1605 | mw[f] = "" |
|
1609 | mw[f] = "" | |
1606 | mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False)) |
|
1610 | mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False)) | |
1607 |
|
1611 | |||
1608 | if moddirstate and not wlock: |
|
1612 | if moddirstate and not wlock: | |
1609 | wlock = self.wlock() |
|
1613 | wlock = self.wlock() | |
1610 |
|
1614 | |||
1611 | for f in deleted + removed: |
|
1615 | for f in deleted + removed: | |
1612 | if f in mw: |
|
1616 | if f in mw: | |
1613 | del mw[f] |
|
1617 | del mw[f] | |
1614 |
|
1618 | |||
1615 | # If we're jumping between revisions (as opposed to merging), |
|
1619 | # If we're jumping between revisions (as opposed to merging), | |
1616 | # and if neither the working directory nor the target rev has |
|
1620 | # and if neither the working directory nor the target rev has | |
1617 | # the file, then we need to remove it from the dirstate, to |
|
1621 | # the file, then we need to remove it from the dirstate, to | |
1618 | # prevent the dirstate from listing the file when it is no |
|
1622 | # prevent the dirstate from listing the file when it is no | |
1619 | # longer in the manifest. |
|
1623 | # longer in the manifest. | |
1620 | if moddirstate and linear_path and f not in m2: |
|
1624 | if moddirstate and linear_path and f not in m2: | |
1621 | self.dirstate.forget((f,)) |
|
1625 | self.dirstate.forget((f,)) | |
1622 |
|
1626 | |||
1623 | # Compare manifests |
|
1627 | # Compare manifests | |
1624 | for f, n in mw.iteritems(): |
|
1628 | for f, n in mw.iteritems(): | |
1625 | if choose and not choose(f): |
|
1629 | if choose and not choose(f): | |
1626 | continue |
|
1630 | continue | |
1627 | if f in m2: |
|
1631 | if f in m2: | |
1628 | s = 0 |
|
1632 | s = 0 | |
1629 |
|
1633 | |||
1630 | # is the wfile new since m1, and match m2? |
|
1634 | # is the wfile new since m1, and match m2? | |
1631 | if f not in m1: |
|
1635 | if f not in m1: | |
1632 | t1 = self.wread(f) |
|
1636 | t1 = self.wread(f) | |
1633 | t2 = self.file(f).read(m2[f]) |
|
1637 | t2 = self.file(f).read(m2[f]) | |
1634 | if cmp(t1, t2) == 0: |
|
1638 | if cmp(t1, t2) == 0: | |
1635 | n = m2[f] |
|
1639 | n = m2[f] | |
1636 | del t1, t2 |
|
1640 | del t1, t2 | |
1637 |
|
1641 | |||
1638 | # are files different? |
|
1642 | # are files different? | |
1639 | if n != m2[f]: |
|
1643 | if n != m2[f]: | |
1640 | a = ma.get(f, nullid) |
|
1644 | a = ma.get(f, nullid) | |
1641 | # are both different from the ancestor? |
|
1645 | # are both different from the ancestor? | |
1642 | if n != a and m2[f] != a: |
|
1646 | if n != a and m2[f] != a: | |
1643 | self.ui.debug(_(" %s versions differ, resolve\n") % f) |
|
1647 | self.ui.debug(_(" %s versions differ, resolve\n") % f) | |
1644 | # merge executable bits |
|
1648 | # merge executable bits | |
1645 | # "if we changed or they changed, change in merge" |
|
1649 | # "if we changed or they changed, change in merge" | |
1646 | a, b, c = mfa.get(f, 0), mfw[f], mf2[f] |
|
1650 | a, b, c = mfa.get(f, 0), mfw[f], mf2[f] | |
1647 | mode = ((a^b) | (a^c)) ^ a |
|
1651 | mode = ((a^b) | (a^c)) ^ a | |
1648 | merge[f] = (m1.get(f, nullid), m2[f], mode) |
|
1652 | merge[f] = (m1.get(f, nullid), m2[f], mode) | |
1649 | s = 1 |
|
1653 | s = 1 | |
1650 | # are we clobbering? |
|
1654 | # are we clobbering? | |
1651 | # is remote's version newer? |
|
1655 | # is remote's version newer? | |
1652 | # or are we going back in time? |
|
1656 | # or are we going back in time? | |
1653 | elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]): |
|
1657 | elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]): | |
1654 | self.ui.debug(_(" remote %s is newer, get\n") % f) |
|
1658 | self.ui.debug(_(" remote %s is newer, get\n") % f) | |
1655 | get[f] = m2[f] |
|
1659 | get[f] = m2[f] | |
1656 | s = 1 |
|
1660 | s = 1 | |
1657 | elif f in umap or f in added: |
|
1661 | elif f in umap or f in added: | |
1658 | # this unknown file is the same as the checkout |
|
1662 | # this unknown file is the same as the checkout | |
1659 | # we need to reset the dirstate if the file was added |
|
1663 | # we need to reset the dirstate if the file was added | |
1660 | get[f] = m2[f] |
|
1664 | get[f] = m2[f] | |
1661 |
|
1665 | |||
1662 | if not s and mfw[f] != mf2[f]: |
|
1666 | if not s and mfw[f] != mf2[f]: | |
1663 | if force: |
|
1667 | if force: | |
1664 | self.ui.debug(_(" updating permissions for %s\n") % f) |
|
1668 | self.ui.debug(_(" updating permissions for %s\n") % f) | |
1665 | util.set_exec(self.wjoin(f), mf2[f]) |
|
1669 | util.set_exec(self.wjoin(f), mf2[f]) | |
1666 | else: |
|
1670 | else: | |
1667 | a, b, c = mfa.get(f, 0), mfw[f], mf2[f] |
|
1671 | a, b, c = mfa.get(f, 0), mfw[f], mf2[f] | |
1668 | mode = ((a^b) | (a^c)) ^ a |
|
1672 | mode = ((a^b) | (a^c)) ^ a | |
1669 | if mode != b: |
|
1673 | if mode != b: | |
1670 | self.ui.debug(_(" updating permissions for %s\n") |
|
1674 | self.ui.debug(_(" updating permissions for %s\n") | |
1671 | % f) |
|
1675 | % f) | |
1672 | util.set_exec(self.wjoin(f), mode) |
|
1676 | util.set_exec(self.wjoin(f), mode) | |
1673 | del m2[f] |
|
1677 | del m2[f] | |
1674 | elif f in ma: |
|
1678 | elif f in ma: | |
1675 | if n != ma[f]: |
|
1679 | if n != ma[f]: | |
1676 | r = _("d") |
|
1680 | r = _("d") | |
1677 | if not force and (linear_path or allow): |
|
1681 | if not force and (linear_path or allow): | |
1678 | r = self.ui.prompt( |
|
1682 | r = self.ui.prompt( | |
1679 | (_(" local changed %s which remote deleted\n") % f) + |
|
1683 | (_(" local changed %s which remote deleted\n") % f) + | |
1680 | _("(k)eep or (d)elete?"), _("[kd]"), _("k")) |
|
1684 | _("(k)eep or (d)elete?"), _("[kd]"), _("k")) | |
1681 | if r == _("d"): |
|
1685 | if r == _("d"): | |
1682 | remove.append(f) |
|
1686 | remove.append(f) | |
1683 | else: |
|
1687 | else: | |
1684 | self.ui.debug(_("other deleted %s\n") % f) |
|
1688 | self.ui.debug(_("other deleted %s\n") % f) | |
1685 | remove.append(f) # other deleted it |
|
1689 | remove.append(f) # other deleted it | |
1686 | else: |
|
1690 | else: | |
1687 | # file is created on branch or in working directory |
|
1691 | # file is created on branch or in working directory | |
1688 | if force and f not in umap: |
|
1692 | if force and f not in umap: | |
1689 | self.ui.debug(_("remote deleted %s, clobbering\n") % f) |
|
1693 | self.ui.debug(_("remote deleted %s, clobbering\n") % f) | |
1690 | remove.append(f) |
|
1694 | remove.append(f) | |
1691 | elif n == m1.get(f, nullid): # same as parent |
|
1695 | elif n == m1.get(f, nullid): # same as parent | |
1692 | if p2 == pa: # going backwards? |
|
1696 | if p2 == pa: # going backwards? | |
1693 | self.ui.debug(_("remote deleted %s\n") % f) |
|
1697 | self.ui.debug(_("remote deleted %s\n") % f) | |
1694 | remove.append(f) |
|
1698 | remove.append(f) | |
1695 | else: |
|
1699 | else: | |
1696 | self.ui.debug(_("local modified %s, keeping\n") % f) |
|
1700 | self.ui.debug(_("local modified %s, keeping\n") % f) | |
1697 | else: |
|
1701 | else: | |
1698 | self.ui.debug(_("working dir created %s, keeping\n") % f) |
|
1702 | self.ui.debug(_("working dir created %s, keeping\n") % f) | |
1699 |
|
1703 | |||
1700 | for f, n in m2.iteritems(): |
|
1704 | for f, n in m2.iteritems(): | |
1701 | if choose and not choose(f): |
|
1705 | if choose and not choose(f): | |
1702 | continue |
|
1706 | continue | |
1703 | if f[0] == "/": |
|
1707 | if f[0] == "/": | |
1704 | continue |
|
1708 | continue | |
1705 | if f in ma and n != ma[f]: |
|
1709 | if f in ma and n != ma[f]: | |
1706 | r = _("k") |
|
1710 | r = _("k") | |
1707 | if not force and (linear_path or allow): |
|
1711 | if not force and (linear_path or allow): | |
1708 | r = self.ui.prompt( |
|
1712 | r = self.ui.prompt( | |
1709 | (_("remote changed %s which local deleted\n") % f) + |
|
1713 | (_("remote changed %s which local deleted\n") % f) + | |
1710 | _("(k)eep or (d)elete?"), _("[kd]"), _("k")) |
|
1714 | _("(k)eep or (d)elete?"), _("[kd]"), _("k")) | |
1711 | if r == _("k"): |
|
1715 | if r == _("k"): | |
1712 | get[f] = n |
|
1716 | get[f] = n | |
1713 | elif f not in ma: |
|
1717 | elif f not in ma: | |
1714 | self.ui.debug(_("remote created %s\n") % f) |
|
1718 | self.ui.debug(_("remote created %s\n") % f) | |
1715 | get[f] = n |
|
1719 | get[f] = n | |
1716 | else: |
|
1720 | else: | |
1717 | if force or p2 == pa: # going backwards? |
|
1721 | if force or p2 == pa: # going backwards? | |
1718 | self.ui.debug(_("local deleted %s, recreating\n") % f) |
|
1722 | self.ui.debug(_("local deleted %s, recreating\n") % f) | |
1719 | get[f] = n |
|
1723 | get[f] = n | |
1720 | else: |
|
1724 | else: | |
1721 | self.ui.debug(_("local deleted %s\n") % f) |
|
1725 | self.ui.debug(_("local deleted %s\n") % f) | |
1722 |
|
1726 | |||
1723 | del mw, m1, m2, ma |
|
1727 | del mw, m1, m2, ma | |
1724 |
|
1728 | |||
1725 | if force: |
|
1729 | if force: | |
1726 | for f in merge: |
|
1730 | for f in merge: | |
1727 | get[f] = merge[f][1] |
|
1731 | get[f] = merge[f][1] | |
1728 | merge = {} |
|
1732 | merge = {} | |
1729 |
|
1733 | |||
1730 | if linear_path or force: |
|
1734 | if linear_path or force: | |
1731 | # we don't need to do any magic, just jump to the new rev |
|
1735 | # we don't need to do any magic, just jump to the new rev | |
1732 | branch_merge = False |
|
1736 | branch_merge = False | |
1733 | p1, p2 = p2, nullid |
|
1737 | p1, p2 = p2, nullid | |
1734 | else: |
|
1738 | else: | |
1735 | if not allow: |
|
1739 | if not allow: | |
1736 | self.ui.status(_("this update spans a branch" |
|
1740 | self.ui.status(_("this update spans a branch" | |
1737 | " affecting the following files:\n")) |
|
1741 | " affecting the following files:\n")) | |
1738 | fl = merge.keys() + get.keys() |
|
1742 | fl = merge.keys() + get.keys() | |
1739 | fl.sort() |
|
1743 | fl.sort() | |
1740 | for f in fl: |
|
1744 | for f in fl: | |
1741 | cf = "" |
|
1745 | cf = "" | |
1742 | if f in merge: |
|
1746 | if f in merge: | |
1743 | cf = _(" (resolve)") |
|
1747 | cf = _(" (resolve)") | |
1744 | self.ui.status(" %s%s\n" % (f, cf)) |
|
1748 | self.ui.status(" %s%s\n" % (f, cf)) | |
1745 | self.ui.warn(_("aborting update spanning branches!\n")) |
|
1749 | self.ui.warn(_("aborting update spanning branches!\n")) | |
1746 | self.ui.status(_("(use 'hg merge' to merge across branches" |
|
1750 | self.ui.status(_("(use 'hg merge' to merge across branches" | |
1747 | " or 'hg update -C' to lose changes)\n")) |
|
1751 | " or 'hg update -C' to lose changes)\n")) | |
1748 | return 1 |
|
1752 | return 1 | |
1749 | branch_merge = True |
|
1753 | branch_merge = True | |
1750 |
|
1754 | |||
1751 | # get the files we don't need to change |
|
1755 | # get the files we don't need to change | |
1752 | files = get.keys() |
|
1756 | files = get.keys() | |
1753 | files.sort() |
|
1757 | files.sort() | |
1754 | for f in files: |
|
1758 | for f in files: | |
1755 | if f[0] == "/": |
|
1759 | if f[0] == "/": | |
1756 | continue |
|
1760 | continue | |
1757 | self.ui.note(_("getting %s\n") % f) |
|
1761 | self.ui.note(_("getting %s\n") % f) | |
1758 | t = self.file(f).read(get[f]) |
|
1762 | t = self.file(f).read(get[f]) | |
1759 | self.wwrite(f, t) |
|
1763 | self.wwrite(f, t) | |
1760 | util.set_exec(self.wjoin(f), mf2[f]) |
|
1764 | util.set_exec(self.wjoin(f), mf2[f]) | |
1761 | if moddirstate: |
|
1765 | if moddirstate: | |
1762 | if branch_merge: |
|
1766 | if branch_merge: | |
1763 | self.dirstate.update([f], 'n', st_mtime=-1) |
|
1767 | self.dirstate.update([f], 'n', st_mtime=-1) | |
1764 | else: |
|
1768 | else: | |
1765 | self.dirstate.update([f], 'n') |
|
1769 | self.dirstate.update([f], 'n') | |
1766 |
|
1770 | |||
1767 | # merge the tricky bits |
|
1771 | # merge the tricky bits | |
1768 | failedmerge = [] |
|
1772 | failedmerge = [] | |
1769 | files = merge.keys() |
|
1773 | files = merge.keys() | |
1770 | files.sort() |
|
1774 | files.sort() | |
1771 | xp1 = hex(p1) |
|
1775 | xp1 = hex(p1) | |
1772 | xp2 = hex(p2) |
|
1776 | xp2 = hex(p2) | |
1773 | for f in files: |
|
1777 | for f in files: | |
1774 | self.ui.status(_("merging %s\n") % f) |
|
1778 | self.ui.status(_("merging %s\n") % f) | |
1775 | my, other, flag = merge[f] |
|
1779 | my, other, flag = merge[f] | |
1776 | ret = self.merge3(f, my, other, xp1, xp2) |
|
1780 | ret = self.merge3(f, my, other, xp1, xp2) | |
1777 | if ret: |
|
1781 | if ret: | |
1778 | err = True |
|
1782 | err = True | |
1779 | failedmerge.append(f) |
|
1783 | failedmerge.append(f) | |
1780 | util.set_exec(self.wjoin(f), flag) |
|
1784 | util.set_exec(self.wjoin(f), flag) | |
1781 | if moddirstate: |
|
1785 | if moddirstate: | |
1782 | if branch_merge: |
|
1786 | if branch_merge: | |
1783 | # We've done a branch merge, mark this file as merged |
|
1787 | # We've done a branch merge, mark this file as merged | |
1784 | # so that we properly record the merger later |
|
1788 | # so that we properly record the merger later | |
1785 | self.dirstate.update([f], 'm') |
|
1789 | self.dirstate.update([f], 'm') | |
1786 | else: |
|
1790 | else: | |
1787 | # We've update-merged a locally modified file, so |
|
1791 | # We've update-merged a locally modified file, so | |
1788 | # we set the dirstate to emulate a normal checkout |
|
1792 | # we set the dirstate to emulate a normal checkout | |
1789 | # of that file some time in the past. Thus our |
|
1793 | # of that file some time in the past. Thus our | |
1790 | # merge will appear as a normal local file |
|
1794 | # merge will appear as a normal local file | |
1791 | # modification. |
|
1795 | # modification. | |
1792 | f_len = len(self.file(f).read(other)) |
|
1796 | f_len = len(self.file(f).read(other)) | |
1793 | self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1) |
|
1797 | self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1) | |
1794 |
|
1798 | |||
1795 | remove.sort() |
|
1799 | remove.sort() | |
1796 | for f in remove: |
|
1800 | for f in remove: | |
1797 | self.ui.note(_("removing %s\n") % f) |
|
1801 | self.ui.note(_("removing %s\n") % f) | |
1798 | util.audit_path(f) |
|
1802 | util.audit_path(f) | |
1799 | try: |
|
1803 | try: | |
1800 | util.unlink(self.wjoin(f)) |
|
1804 | util.unlink(self.wjoin(f)) | |
1801 | except OSError, inst: |
|
1805 | except OSError, inst: | |
1802 | if inst.errno != errno.ENOENT: |
|
1806 | if inst.errno != errno.ENOENT: | |
1803 | self.ui.warn(_("update failed to remove %s: %s!\n") % |
|
1807 | self.ui.warn(_("update failed to remove %s: %s!\n") % | |
1804 | (f, inst.strerror)) |
|
1808 | (f, inst.strerror)) | |
1805 | if moddirstate: |
|
1809 | if moddirstate: | |
1806 | if branch_merge: |
|
1810 | if branch_merge: | |
1807 | self.dirstate.update(remove, 'r') |
|
1811 | self.dirstate.update(remove, 'r') | |
1808 | else: |
|
1812 | else: | |
1809 | self.dirstate.forget(remove) |
|
1813 | self.dirstate.forget(remove) | |
1810 |
|
1814 | |||
1811 | if moddirstate: |
|
1815 | if moddirstate: | |
1812 | self.dirstate.setparents(p1, p2) |
|
1816 | self.dirstate.setparents(p1, p2) | |
1813 |
|
1817 | |||
1814 | if show_stats: |
|
1818 | if show_stats: | |
1815 | stats = ((len(get), _("updated")), |
|
1819 | stats = ((len(get), _("updated")), | |
1816 | (len(merge) - len(failedmerge), _("merged")), |
|
1820 | (len(merge) - len(failedmerge), _("merged")), | |
1817 | (len(remove), _("removed")), |
|
1821 | (len(remove), _("removed")), | |
1818 | (len(failedmerge), _("unresolved"))) |
|
1822 | (len(failedmerge), _("unresolved"))) | |
1819 | note = ", ".join([_("%d files %s") % s for s in stats]) |
|
1823 | note = ", ".join([_("%d files %s") % s for s in stats]) | |
1820 | self.ui.status("%s\n" % note) |
|
1824 | self.ui.status("%s\n" % note) | |
1821 | if moddirstate: |
|
1825 | if moddirstate: | |
1822 | if branch_merge: |
|
1826 | if branch_merge: | |
1823 | if failedmerge: |
|
1827 | if failedmerge: | |
1824 | self.ui.status(_("There are unresolved merges," |
|
1828 | self.ui.status(_("There are unresolved merges," | |
1825 | " you can redo the full merge using:\n" |
|
1829 | " you can redo the full merge using:\n" | |
1826 | " hg update -C %s\n" |
|
1830 | " hg update -C %s\n" | |
1827 | " hg merge %s\n" |
|
1831 | " hg merge %s\n" | |
1828 | % (self.changelog.rev(p1), |
|
1832 | % (self.changelog.rev(p1), | |
1829 | self.changelog.rev(p2)))) |
|
1833 | self.changelog.rev(p2)))) | |
1830 | else: |
|
1834 | else: | |
1831 | self.ui.status(_("(branch merge, don't forget to commit)\n")) |
|
1835 | self.ui.status(_("(branch merge, don't forget to commit)\n")) | |
1832 | elif failedmerge: |
|
1836 | elif failedmerge: | |
1833 | self.ui.status(_("There are unresolved merges with" |
|
1837 | self.ui.status(_("There are unresolved merges with" | |
1834 | " locally modified files.\n")) |
|
1838 | " locally modified files.\n")) | |
1835 |
|
1839 | |||
1836 | return err |
|
1840 | return err | |
1837 |
|
1841 | |||
1838 | def merge3(self, fn, my, other, p1, p2): |
|
1842 | def merge3(self, fn, my, other, p1, p2): | |
1839 | """perform a 3-way merge in the working directory""" |
|
1843 | """perform a 3-way merge in the working directory""" | |
1840 |
|
1844 | |||
1841 | def temp(prefix, node): |
|
1845 | def temp(prefix, node): | |
1842 | pre = "%s~%s." % (os.path.basename(fn), prefix) |
|
1846 | pre = "%s~%s." % (os.path.basename(fn), prefix) | |
1843 | (fd, name) = tempfile.mkstemp(prefix=pre) |
|
1847 | (fd, name) = tempfile.mkstemp(prefix=pre) | |
1844 | f = os.fdopen(fd, "wb") |
|
1848 | f = os.fdopen(fd, "wb") | |
1845 | self.wwrite(fn, fl.read(node), f) |
|
1849 | self.wwrite(fn, fl.read(node), f) | |
1846 | f.close() |
|
1850 | f.close() | |
1847 | return name |
|
1851 | return name | |
1848 |
|
1852 | |||
1849 | fl = self.file(fn) |
|
1853 | fl = self.file(fn) | |
1850 | base = fl.ancestor(my, other) |
|
1854 | base = fl.ancestor(my, other) | |
1851 | a = self.wjoin(fn) |
|
1855 | a = self.wjoin(fn) | |
1852 | b = temp("base", base) |
|
1856 | b = temp("base", base) | |
1853 | c = temp("other", other) |
|
1857 | c = temp("other", other) | |
1854 |
|
1858 | |||
1855 | self.ui.note(_("resolving %s\n") % fn) |
|
1859 | self.ui.note(_("resolving %s\n") % fn) | |
1856 | self.ui.debug(_("file %s: my %s other %s ancestor %s\n") % |
|
1860 | self.ui.debug(_("file %s: my %s other %s ancestor %s\n") % | |
1857 | (fn, short(my), short(other), short(base))) |
|
1861 | (fn, short(my), short(other), short(base))) | |
1858 |
|
1862 | |||
1859 | cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge") |
|
1863 | cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge") | |
1860 | or "hgmerge") |
|
1864 | or "hgmerge") | |
1861 | r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root, |
|
1865 | r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root, | |
1862 | environ={'HG_FILE': fn, |
|
1866 | environ={'HG_FILE': fn, | |
1863 | 'HG_MY_NODE': p1, |
|
1867 | 'HG_MY_NODE': p1, | |
1864 | 'HG_OTHER_NODE': p2, |
|
1868 | 'HG_OTHER_NODE': p2, | |
1865 | 'HG_FILE_MY_NODE': hex(my), |
|
1869 | 'HG_FILE_MY_NODE': hex(my), | |
1866 | 'HG_FILE_OTHER_NODE': hex(other), |
|
1870 | 'HG_FILE_OTHER_NODE': hex(other), | |
1867 | 'HG_FILE_BASE_NODE': hex(base)}) |
|
1871 | 'HG_FILE_BASE_NODE': hex(base)}) | |
1868 | if r: |
|
1872 | if r: | |
1869 | self.ui.warn(_("merging %s failed!\n") % fn) |
|
1873 | self.ui.warn(_("merging %s failed!\n") % fn) | |
1870 |
|
1874 | |||
1871 | os.unlink(b) |
|
1875 | os.unlink(b) | |
1872 | os.unlink(c) |
|
1876 | os.unlink(c) | |
1873 | return r |
|
1877 | return r | |
1874 |
|
1878 | |||
1875 | def verify(self): |
|
1879 | def verify(self): | |
1876 | filelinkrevs = {} |
|
1880 | filelinkrevs = {} | |
1877 | filenodes = {} |
|
1881 | filenodes = {} | |
1878 | changesets = revisions = files = 0 |
|
1882 | changesets = revisions = files = 0 | |
1879 | errors = [0] |
|
1883 | errors = [0] | |
1880 | warnings = [0] |
|
1884 | warnings = [0] | |
1881 | neededmanifests = {} |
|
1885 | neededmanifests = {} | |
1882 |
|
1886 | |||
1883 | def err(msg): |
|
1887 | def err(msg): | |
1884 | self.ui.warn(msg + "\n") |
|
1888 | self.ui.warn(msg + "\n") | |
1885 | errors[0] += 1 |
|
1889 | errors[0] += 1 | |
1886 |
|
1890 | |||
1887 | def warn(msg): |
|
1891 | def warn(msg): | |
1888 | self.ui.warn(msg + "\n") |
|
1892 | self.ui.warn(msg + "\n") | |
1889 | warnings[0] += 1 |
|
1893 | warnings[0] += 1 | |
1890 |
|
1894 | |||
1891 | def checksize(obj, name): |
|
1895 | def checksize(obj, name): | |
1892 | d = obj.checksize() |
|
1896 | d = obj.checksize() | |
1893 | if d[0]: |
|
1897 | if d[0]: | |
1894 | err(_("%s data length off by %d bytes") % (name, d[0])) |
|
1898 | err(_("%s data length off by %d bytes") % (name, d[0])) | |
1895 | if d[1]: |
|
1899 | if d[1]: | |
1896 | err(_("%s index contains %d extra bytes") % (name, d[1])) |
|
1900 | err(_("%s index contains %d extra bytes") % (name, d[1])) | |
1897 |
|
1901 | |||
1898 | def checkversion(obj, name): |
|
1902 | def checkversion(obj, name): | |
1899 | if obj.version != revlog.REVLOGV0: |
|
1903 | if obj.version != revlog.REVLOGV0: | |
1900 | if not revlogv1: |
|
1904 | if not revlogv1: | |
1901 | warn(_("warning: `%s' uses revlog format 1") % name) |
|
1905 | warn(_("warning: `%s' uses revlog format 1") % name) | |
1902 | elif revlogv1: |
|
1906 | elif revlogv1: | |
1903 | warn(_("warning: `%s' uses revlog format 0") % name) |
|
1907 | warn(_("warning: `%s' uses revlog format 0") % name) | |
1904 |
|
1908 | |||
1905 | revlogv1 = self.revlogversion != revlog.REVLOGV0 |
|
1909 | revlogv1 = self.revlogversion != revlog.REVLOGV0 | |
1906 | if self.ui.verbose or revlogv1 != self.revlogv1: |
|
1910 | if self.ui.verbose or revlogv1 != self.revlogv1: | |
1907 | self.ui.status(_("repository uses revlog format %d\n") % |
|
1911 | self.ui.status(_("repository uses revlog format %d\n") % | |
1908 | (revlogv1 and 1 or 0)) |
|
1912 | (revlogv1 and 1 or 0)) | |
1909 |
|
1913 | |||
1910 | seen = {} |
|
1914 | seen = {} | |
1911 | self.ui.status(_("checking changesets\n")) |
|
1915 | self.ui.status(_("checking changesets\n")) | |
1912 | checksize(self.changelog, "changelog") |
|
1916 | checksize(self.changelog, "changelog") | |
1913 |
|
1917 | |||
1914 | for i in range(self.changelog.count()): |
|
1918 | for i in range(self.changelog.count()): | |
1915 | changesets += 1 |
|
1919 | changesets += 1 | |
1916 | n = self.changelog.node(i) |
|
1920 | n = self.changelog.node(i) | |
1917 | l = self.changelog.linkrev(n) |
|
1921 | l = self.changelog.linkrev(n) | |
1918 | if l != i: |
|
1922 | if l != i: | |
1919 | err(_("incorrect link (%d) for changeset revision %d") %(l, i)) |
|
1923 | err(_("incorrect link (%d) for changeset revision %d") %(l, i)) | |
1920 | if n in seen: |
|
1924 | if n in seen: | |
1921 | err(_("duplicate changeset at revision %d") % i) |
|
1925 | err(_("duplicate changeset at revision %d") % i) | |
1922 | seen[n] = 1 |
|
1926 | seen[n] = 1 | |
1923 |
|
1927 | |||
1924 | for p in self.changelog.parents(n): |
|
1928 | for p in self.changelog.parents(n): | |
1925 | if p not in self.changelog.nodemap: |
|
1929 | if p not in self.changelog.nodemap: | |
1926 | err(_("changeset %s has unknown parent %s") % |
|
1930 | err(_("changeset %s has unknown parent %s") % | |
1927 | (short(n), short(p))) |
|
1931 | (short(n), short(p))) | |
1928 | try: |
|
1932 | try: | |
1929 | changes = self.changelog.read(n) |
|
1933 | changes = self.changelog.read(n) | |
1930 | except KeyboardInterrupt: |
|
1934 | except KeyboardInterrupt: | |
1931 | self.ui.warn(_("interrupted")) |
|
1935 | self.ui.warn(_("interrupted")) | |
1932 | raise |
|
1936 | raise | |
1933 | except Exception, inst: |
|
1937 | except Exception, inst: | |
1934 | err(_("unpacking changeset %s: %s") % (short(n), inst)) |
|
1938 | err(_("unpacking changeset %s: %s") % (short(n), inst)) | |
1935 | continue |
|
1939 | continue | |
1936 |
|
1940 | |||
1937 | neededmanifests[changes[0]] = n |
|
1941 | neededmanifests[changes[0]] = n | |
1938 |
|
1942 | |||
1939 | for f in changes[3]: |
|
1943 | for f in changes[3]: | |
1940 | filelinkrevs.setdefault(f, []).append(i) |
|
1944 | filelinkrevs.setdefault(f, []).append(i) | |
1941 |
|
1945 | |||
1942 | seen = {} |
|
1946 | seen = {} | |
1943 | self.ui.status(_("checking manifests\n")) |
|
1947 | self.ui.status(_("checking manifests\n")) | |
1944 | checkversion(self.manifest, "manifest") |
|
1948 | checkversion(self.manifest, "manifest") | |
1945 | checksize(self.manifest, "manifest") |
|
1949 | checksize(self.manifest, "manifest") | |
1946 |
|
1950 | |||
1947 | for i in range(self.manifest.count()): |
|
1951 | for i in range(self.manifest.count()): | |
1948 | n = self.manifest.node(i) |
|
1952 | n = self.manifest.node(i) | |
1949 | l = self.manifest.linkrev(n) |
|
1953 | l = self.manifest.linkrev(n) | |
1950 |
|
1954 | |||
1951 | if l < 0 or l >= self.changelog.count(): |
|
1955 | if l < 0 or l >= self.changelog.count(): | |
1952 | err(_("bad manifest link (%d) at revision %d") % (l, i)) |
|
1956 | err(_("bad manifest link (%d) at revision %d") % (l, i)) | |
1953 |
|
1957 | |||
1954 | if n in neededmanifests: |
|
1958 | if n in neededmanifests: | |
1955 | del neededmanifests[n] |
|
1959 | del neededmanifests[n] | |
1956 |
|
1960 | |||
1957 | if n in seen: |
|
1961 | if n in seen: | |
1958 | err(_("duplicate manifest at revision %d") % i) |
|
1962 | err(_("duplicate manifest at revision %d") % i) | |
1959 |
|
1963 | |||
1960 | seen[n] = 1 |
|
1964 | seen[n] = 1 | |
1961 |
|
1965 | |||
1962 | for p in self.manifest.parents(n): |
|
1966 | for p in self.manifest.parents(n): | |
1963 | if p not in self.manifest.nodemap: |
|
1967 | if p not in self.manifest.nodemap: | |
1964 | err(_("manifest %s has unknown parent %s") % |
|
1968 | err(_("manifest %s has unknown parent %s") % | |
1965 | (short(n), short(p))) |
|
1969 | (short(n), short(p))) | |
1966 |
|
1970 | |||
1967 | try: |
|
1971 | try: | |
1968 | delta = mdiff.patchtext(self.manifest.delta(n)) |
|
1972 | delta = mdiff.patchtext(self.manifest.delta(n)) | |
1969 | except KeyboardInterrupt: |
|
1973 | except KeyboardInterrupt: | |
1970 | self.ui.warn(_("interrupted")) |
|
1974 | self.ui.warn(_("interrupted")) | |
1971 | raise |
|
1975 | raise | |
1972 | except Exception, inst: |
|
1976 | except Exception, inst: | |
1973 | err(_("unpacking manifest %s: %s") % (short(n), inst)) |
|
1977 | err(_("unpacking manifest %s: %s") % (short(n), inst)) | |
1974 | continue |
|
1978 | continue | |
1975 |
|
1979 | |||
1976 | try: |
|
1980 | try: | |
1977 | ff = [ l.split('\0') for l in delta.splitlines() ] |
|
1981 | ff = [ l.split('\0') for l in delta.splitlines() ] | |
1978 | for f, fn in ff: |
|
1982 | for f, fn in ff: | |
1979 | filenodes.setdefault(f, {})[bin(fn[:40])] = 1 |
|
1983 | filenodes.setdefault(f, {})[bin(fn[:40])] = 1 | |
1980 | except (ValueError, TypeError), inst: |
|
1984 | except (ValueError, TypeError), inst: | |
1981 | err(_("broken delta in manifest %s: %s") % (short(n), inst)) |
|
1985 | err(_("broken delta in manifest %s: %s") % (short(n), inst)) | |
1982 |
|
1986 | |||
1983 | self.ui.status(_("crosschecking files in changesets and manifests\n")) |
|
1987 | self.ui.status(_("crosschecking files in changesets and manifests\n")) | |
1984 |
|
1988 | |||
1985 | for m, c in neededmanifests.items(): |
|
1989 | for m, c in neededmanifests.items(): | |
1986 | err(_("Changeset %s refers to unknown manifest %s") % |
|
1990 | err(_("Changeset %s refers to unknown manifest %s") % | |
1987 | (short(m), short(c))) |
|
1991 | (short(m), short(c))) | |
1988 | del neededmanifests |
|
1992 | del neededmanifests | |
1989 |
|
1993 | |||
1990 | for f in filenodes: |
|
1994 | for f in filenodes: | |
1991 | if f not in filelinkrevs: |
|
1995 | if f not in filelinkrevs: | |
1992 | err(_("file %s in manifest but not in changesets") % f) |
|
1996 | err(_("file %s in manifest but not in changesets") % f) | |
1993 |
|
1997 | |||
1994 | for f in filelinkrevs: |
|
1998 | for f in filelinkrevs: | |
1995 | if f not in filenodes: |
|
1999 | if f not in filenodes: | |
1996 | err(_("file %s in changeset but not in manifest") % f) |
|
2000 | err(_("file %s in changeset but not in manifest") % f) | |
1997 |
|
2001 | |||
1998 | self.ui.status(_("checking files\n")) |
|
2002 | self.ui.status(_("checking files\n")) | |
1999 | ff = filenodes.keys() |
|
2003 | ff = filenodes.keys() | |
2000 | ff.sort() |
|
2004 | ff.sort() | |
2001 | for f in ff: |
|
2005 | for f in ff: | |
2002 | if f == "/dev/null": |
|
2006 | if f == "/dev/null": | |
2003 | continue |
|
2007 | continue | |
2004 | files += 1 |
|
2008 | files += 1 | |
2005 | if not f: |
|
2009 | if not f: | |
2006 | err(_("file without name in manifest %s") % short(n)) |
|
2010 | err(_("file without name in manifest %s") % short(n)) | |
2007 | continue |
|
2011 | continue | |
2008 | fl = self.file(f) |
|
2012 | fl = self.file(f) | |
2009 | checkversion(fl, f) |
|
2013 | checkversion(fl, f) | |
2010 | checksize(fl, f) |
|
2014 | checksize(fl, f) | |
2011 |
|
2015 | |||
2012 | nodes = {nullid: 1} |
|
2016 | nodes = {nullid: 1} | |
2013 | seen = {} |
|
2017 | seen = {} | |
2014 | for i in range(fl.count()): |
|
2018 | for i in range(fl.count()): | |
2015 | revisions += 1 |
|
2019 | revisions += 1 | |
2016 | n = fl.node(i) |
|
2020 | n = fl.node(i) | |
2017 |
|
2021 | |||
2018 | if n in seen: |
|
2022 | if n in seen: | |
2019 | err(_("%s: duplicate revision %d") % (f, i)) |
|
2023 | err(_("%s: duplicate revision %d") % (f, i)) | |
2020 | if n not in filenodes[f]: |
|
2024 | if n not in filenodes[f]: | |
2021 | err(_("%s: %d:%s not in manifests") % (f, i, short(n))) |
|
2025 | err(_("%s: %d:%s not in manifests") % (f, i, short(n))) | |
2022 | else: |
|
2026 | else: | |
2023 | del filenodes[f][n] |
|
2027 | del filenodes[f][n] | |
2024 |
|
2028 | |||
2025 | flr = fl.linkrev(n) |
|
2029 | flr = fl.linkrev(n) | |
2026 | if flr not in filelinkrevs.get(f, []): |
|
2030 | if flr not in filelinkrevs.get(f, []): | |
2027 | err(_("%s:%s points to unexpected changeset %d") |
|
2031 | err(_("%s:%s points to unexpected changeset %d") | |
2028 | % (f, short(n), flr)) |
|
2032 | % (f, short(n), flr)) | |
2029 | else: |
|
2033 | else: | |
2030 | filelinkrevs[f].remove(flr) |
|
2034 | filelinkrevs[f].remove(flr) | |
2031 |
|
2035 | |||
2032 | # verify contents |
|
2036 | # verify contents | |
2033 | try: |
|
2037 | try: | |
2034 | t = fl.read(n) |
|
2038 | t = fl.read(n) | |
2035 | except KeyboardInterrupt: |
|
2039 | except KeyboardInterrupt: | |
2036 | self.ui.warn(_("interrupted")) |
|
2040 | self.ui.warn(_("interrupted")) | |
2037 | raise |
|
2041 | raise | |
2038 | except Exception, inst: |
|
2042 | except Exception, inst: | |
2039 | err(_("unpacking file %s %s: %s") % (f, short(n), inst)) |
|
2043 | err(_("unpacking file %s %s: %s") % (f, short(n), inst)) | |
2040 |
|
2044 | |||
2041 | # verify parents |
|
2045 | # verify parents | |
2042 | (p1, p2) = fl.parents(n) |
|
2046 | (p1, p2) = fl.parents(n) | |
2043 | if p1 not in nodes: |
|
2047 | if p1 not in nodes: | |
2044 | err(_("file %s:%s unknown parent 1 %s") % |
|
2048 | err(_("file %s:%s unknown parent 1 %s") % | |
2045 | (f, short(n), short(p1))) |
|
2049 | (f, short(n), short(p1))) | |
2046 | if p2 not in nodes: |
|
2050 | if p2 not in nodes: | |
2047 | err(_("file %s:%s unknown parent 2 %s") % |
|
2051 | err(_("file %s:%s unknown parent 2 %s") % | |
2048 | (f, short(n), short(p1))) |
|
2052 | (f, short(n), short(p1))) | |
2049 | nodes[n] = 1 |
|
2053 | nodes[n] = 1 | |
2050 |
|
2054 | |||
2051 | # cross-check |
|
2055 | # cross-check | |
2052 | for node in filenodes[f]: |
|
2056 | for node in filenodes[f]: | |
2053 | err(_("node %s in manifests not in %s") % (hex(node), f)) |
|
2057 | err(_("node %s in manifests not in %s") % (hex(node), f)) | |
2054 |
|
2058 | |||
2055 | self.ui.status(_("%d files, %d changesets, %d total revisions\n") % |
|
2059 | self.ui.status(_("%d files, %d changesets, %d total revisions\n") % | |
2056 | (files, changesets, revisions)) |
|
2060 | (files, changesets, revisions)) | |
2057 |
|
2061 | |||
2058 | if warnings[0]: |
|
2062 | if warnings[0]: | |
2059 | self.ui.warn(_("%d warnings encountered!\n") % warnings[0]) |
|
2063 | self.ui.warn(_("%d warnings encountered!\n") % warnings[0]) | |
2060 | if errors[0]: |
|
2064 | if errors[0]: | |
2061 | self.ui.warn(_("%d integrity errors encountered!\n") % errors[0]) |
|
2065 | self.ui.warn(_("%d integrity errors encountered!\n") % errors[0]) | |
2062 | return 1 |
|
2066 | return 1 | |
2063 |
|
2067 | |||
2064 | # used to avoid circular references so destructors work |
|
2068 | # used to avoid circular references so destructors work | |
2065 | def aftertrans(base): |
|
2069 | def aftertrans(base): | |
2066 | p = base |
|
2070 | p = base | |
2067 | def a(): |
|
2071 | def a(): | |
2068 | util.rename(os.path.join(p, "journal"), os.path.join(p, "undo")) |
|
2072 | util.rename(os.path.join(p, "journal"), os.path.join(p, "undo")) | |
2069 | util.rename(os.path.join(p, "journal.dirstate"), |
|
2073 | util.rename(os.path.join(p, "journal.dirstate"), | |
2070 | os.path.join(p, "undo.dirstate")) |
|
2074 | os.path.join(p, "undo.dirstate")) | |
2071 | return a |
|
2075 | return a | |
2072 |
|
2076 |
@@ -1,1241 +1,1246 b'' | |||||
1 | """ |
|
1 | """ | |
2 | revlog.py - storage back-end for mercurial |
|
2 | revlog.py - storage back-end for mercurial | |
3 |
|
3 | |||
4 | This provides efficient delta storage with O(1) retrieve and append |
|
4 | This provides efficient delta storage with O(1) retrieve and append | |
5 | and O(changes) merge between branches |
|
5 | and O(changes) merge between branches | |
6 |
|
6 | |||
7 | Copyright 2005 Matt Mackall <mpm@selenic.com> |
|
7 | Copyright 2005 Matt Mackall <mpm@selenic.com> | |
8 |
|
8 | |||
9 | This software may be used and distributed according to the terms |
|
9 | This software may be used and distributed according to the terms | |
10 | of the GNU General Public License, incorporated herein by reference. |
|
10 | of the GNU General Public License, incorporated herein by reference. | |
11 | """ |
|
11 | """ | |
12 |
|
12 | |||
13 | from node import * |
|
13 | from node import * | |
14 | from i18n import gettext as _ |
|
14 | from i18n import gettext as _ | |
15 | from demandload import demandload |
|
15 | from demandload import demandload | |
16 | demandload(globals(), "binascii changegroup errno heapq mdiff os") |
|
16 | demandload(globals(), "binascii changegroup errno heapq mdiff os") | |
17 | demandload(globals(), "sha struct util zlib") |
|
17 | demandload(globals(), "sha struct util zlib") | |
18 |
|
18 | |||
19 | # revlog version strings |
|
19 | # revlog version strings | |
20 | REVLOGV0 = 0 |
|
20 | REVLOGV0 = 0 | |
21 | REVLOGNG = 1 |
|
21 | REVLOGNG = 1 | |
22 |
|
22 | |||
23 | # revlog flags |
|
23 | # revlog flags | |
24 | REVLOGNGINLINEDATA = (1 << 16) |
|
24 | REVLOGNGINLINEDATA = (1 << 16) | |
|
25 | REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA | |||
|
26 | ||||
|
27 | REVLOG_DEFAULT_FORMAT = REVLOGNG | |||
|
28 | REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS | |||
25 |
|
29 | |||
26 | def flagstr(flag): |
|
30 | def flagstr(flag): | |
27 | if flag == "inline": |
|
31 | if flag == "inline": | |
28 | return REVLOGNGINLINEDATA |
|
32 | return REVLOGNGINLINEDATA | |
29 | raise RevlogError(_("unknown revlog flag %s" % flag)) |
|
33 | raise RevlogError(_("unknown revlog flag %s" % flag)) | |
30 |
|
34 | |||
31 | def hash(text, p1, p2): |
|
35 | def hash(text, p1, p2): | |
32 | """generate a hash from the given text and its parent hashes |
|
36 | """generate a hash from the given text and its parent hashes | |
33 |
|
37 | |||
34 | This hash combines both the current file contents and its history |
|
38 | This hash combines both the current file contents and its history | |
35 | in a manner that makes it easy to distinguish nodes with the same |
|
39 | in a manner that makes it easy to distinguish nodes with the same | |
36 | content in the revision graph. |
|
40 | content in the revision graph. | |
37 | """ |
|
41 | """ | |
38 | l = [p1, p2] |
|
42 | l = [p1, p2] | |
39 | l.sort() |
|
43 | l.sort() | |
40 | s = sha.new(l[0]) |
|
44 | s = sha.new(l[0]) | |
41 | s.update(l[1]) |
|
45 | s.update(l[1]) | |
42 | s.update(text) |
|
46 | s.update(text) | |
43 | return s.digest() |
|
47 | return s.digest() | |
44 |
|
48 | |||
45 | def compress(text): |
|
49 | def compress(text): | |
46 | """ generate a possibly-compressed representation of text """ |
|
50 | """ generate a possibly-compressed representation of text """ | |
47 | if not text: return ("", text) |
|
51 | if not text: return ("", text) | |
48 | if len(text) < 44: |
|
52 | if len(text) < 44: | |
49 | if text[0] == '\0': return ("", text) |
|
53 | if text[0] == '\0': return ("", text) | |
50 | return ('u', text) |
|
54 | return ('u', text) | |
51 | bin = zlib.compress(text) |
|
55 | bin = zlib.compress(text) | |
52 | if len(bin) > len(text): |
|
56 | if len(bin) > len(text): | |
53 | if text[0] == '\0': return ("", text) |
|
57 | if text[0] == '\0': return ("", text) | |
54 | return ('u', text) |
|
58 | return ('u', text) | |
55 | return ("", bin) |
|
59 | return ("", bin) | |
56 |
|
60 | |||
57 | def decompress(bin): |
|
61 | def decompress(bin): | |
58 | """ decompress the given input """ |
|
62 | """ decompress the given input """ | |
59 | if not bin: return bin |
|
63 | if not bin: return bin | |
60 | t = bin[0] |
|
64 | t = bin[0] | |
61 | if t == '\0': return bin |
|
65 | if t == '\0': return bin | |
62 | if t == 'x': return zlib.decompress(bin) |
|
66 | if t == 'x': return zlib.decompress(bin) | |
63 | if t == 'u': return bin[1:] |
|
67 | if t == 'u': return bin[1:] | |
64 | raise RevlogError(_("unknown compression type %r") % t) |
|
68 | raise RevlogError(_("unknown compression type %r") % t) | |
65 |
|
69 | |||
66 | indexformatv0 = ">4l20s20s20s" |
|
70 | indexformatv0 = ">4l20s20s20s" | |
67 | v0shaoffset = 56 |
|
71 | v0shaoffset = 56 | |
68 | # index ng: |
|
72 | # index ng: | |
69 | # 6 bytes offset |
|
73 | # 6 bytes offset | |
70 | # 2 bytes flags |
|
74 | # 2 bytes flags | |
71 | # 4 bytes compressed length |
|
75 | # 4 bytes compressed length | |
72 | # 4 bytes uncompressed length |
|
76 | # 4 bytes uncompressed length | |
73 | # 4 bytes: base rev |
|
77 | # 4 bytes: base rev | |
74 | # 4 bytes link rev |
|
78 | # 4 bytes link rev | |
75 | # 4 bytes parent 1 rev |
|
79 | # 4 bytes parent 1 rev | |
76 | # 4 bytes parent 2 rev |
|
80 | # 4 bytes parent 2 rev | |
77 | # 32 bytes: nodeid |
|
81 | # 32 bytes: nodeid | |
78 | indexformatng = ">Qiiiiii20s12x" |
|
82 | indexformatng = ">Qiiiiii20s12x" | |
79 | ngshaoffset = 32 |
|
83 | ngshaoffset = 32 | |
80 | versionformat = ">i" |
|
84 | versionformat = ">i" | |
81 |
|
85 | |||
82 | class lazyparser(object): |
|
86 | class lazyparser(object): | |
83 | """ |
|
87 | """ | |
84 | this class avoids the need to parse the entirety of large indices |
|
88 | this class avoids the need to parse the entirety of large indices | |
85 | """ |
|
89 | """ | |
86 | def __init__(self, dataf, size, indexformat, shaoffset): |
|
90 | def __init__(self, dataf, size, indexformat, shaoffset): | |
87 | self.dataf = dataf |
|
91 | self.dataf = dataf | |
88 | self.format = indexformat |
|
92 | self.format = indexformat | |
89 | self.s = struct.calcsize(indexformat) |
|
93 | self.s = struct.calcsize(indexformat) | |
90 | self.indexformat = indexformat |
|
94 | self.indexformat = indexformat | |
91 | self.datasize = size |
|
95 | self.datasize = size | |
92 | self.l = size/self.s |
|
96 | self.l = size/self.s | |
93 | self.index = [None] * self.l |
|
97 | self.index = [None] * self.l | |
94 | self.map = {nullid: -1} |
|
98 | self.map = {nullid: -1} | |
95 | self.allmap = 0 |
|
99 | self.allmap = 0 | |
96 | self.all = 0 |
|
100 | self.all = 0 | |
97 | self.mapfind_count = 0 |
|
101 | self.mapfind_count = 0 | |
98 | self.shaoffset = shaoffset |
|
102 | self.shaoffset = shaoffset | |
99 |
|
103 | |||
100 | def loadmap(self): |
|
104 | def loadmap(self): | |
101 | """ |
|
105 | """ | |
102 | during a commit, we need to make sure the rev being added is |
|
106 | during a commit, we need to make sure the rev being added is | |
103 | not a duplicate. This requires loading the entire index, |
|
107 | not a duplicate. This requires loading the entire index, | |
104 | which is fairly slow. loadmap can load up just the node map, |
|
108 | which is fairly slow. loadmap can load up just the node map, | |
105 | which takes much less time. |
|
109 | which takes much less time. | |
106 | """ |
|
110 | """ | |
107 | if self.allmap: return |
|
111 | if self.allmap: return | |
108 | start = 0 |
|
112 | start = 0 | |
109 | end = self.datasize |
|
113 | end = self.datasize | |
110 | self.allmap = 1 |
|
114 | self.allmap = 1 | |
111 | cur = 0 |
|
115 | cur = 0 | |
112 | count = 0 |
|
116 | count = 0 | |
113 | blocksize = self.s * 256 |
|
117 | blocksize = self.s * 256 | |
114 | self.dataf.seek(0) |
|
118 | self.dataf.seek(0) | |
115 | while cur < end: |
|
119 | while cur < end: | |
116 | data = self.dataf.read(blocksize) |
|
120 | data = self.dataf.read(blocksize) | |
117 | off = 0 |
|
121 | off = 0 | |
118 | for x in xrange(256): |
|
122 | for x in xrange(256): | |
119 | n = data[off + self.shaoffset:off + self.shaoffset + 20] |
|
123 | n = data[off + self.shaoffset:off + self.shaoffset + 20] | |
120 | self.map[n] = count |
|
124 | self.map[n] = count | |
121 | count += 1 |
|
125 | count += 1 | |
122 | if count >= self.l: |
|
126 | if count >= self.l: | |
123 | break |
|
127 | break | |
124 | off += self.s |
|
128 | off += self.s | |
125 | cur += blocksize |
|
129 | cur += blocksize | |
126 |
|
130 | |||
127 | def loadblock(self, blockstart, blocksize, data=None): |
|
131 | def loadblock(self, blockstart, blocksize, data=None): | |
128 | if self.all: return |
|
132 | if self.all: return | |
129 | if data is None: |
|
133 | if data is None: | |
130 | self.dataf.seek(blockstart) |
|
134 | self.dataf.seek(blockstart) | |
131 | data = self.dataf.read(blocksize) |
|
135 | data = self.dataf.read(blocksize) | |
132 | lend = len(data) / self.s |
|
136 | lend = len(data) / self.s | |
133 | i = blockstart / self.s |
|
137 | i = blockstart / self.s | |
134 | off = 0 |
|
138 | off = 0 | |
135 | for x in xrange(lend): |
|
139 | for x in xrange(lend): | |
136 | if self.index[i + x] == None: |
|
140 | if self.index[i + x] == None: | |
137 | b = data[off : off + self.s] |
|
141 | b = data[off : off + self.s] | |
138 | self.index[i + x] = b |
|
142 | self.index[i + x] = b | |
139 | n = b[self.shaoffset:self.shaoffset + 20] |
|
143 | n = b[self.shaoffset:self.shaoffset + 20] | |
140 | self.map[n] = i + x |
|
144 | self.map[n] = i + x | |
141 | off += self.s |
|
145 | off += self.s | |
142 |
|
146 | |||
143 | def findnode(self, node): |
|
147 | def findnode(self, node): | |
144 | """search backwards through the index file for a specific node""" |
|
148 | """search backwards through the index file for a specific node""" | |
145 | if self.allmap: return None |
|
149 | if self.allmap: return None | |
146 |
|
150 | |||
147 | # hg log will cause many many searches for the manifest |
|
151 | # hg log will cause many many searches for the manifest | |
148 | # nodes. After we get called a few times, just load the whole |
|
152 | # nodes. After we get called a few times, just load the whole | |
149 | # thing. |
|
153 | # thing. | |
150 | if self.mapfind_count > 8: |
|
154 | if self.mapfind_count > 8: | |
151 | self.loadmap() |
|
155 | self.loadmap() | |
152 | if node in self.map: |
|
156 | if node in self.map: | |
153 | return node |
|
157 | return node | |
154 | return None |
|
158 | return None | |
155 | self.mapfind_count += 1 |
|
159 | self.mapfind_count += 1 | |
156 | last = self.l - 1 |
|
160 | last = self.l - 1 | |
157 | while self.index[last] != None: |
|
161 | while self.index[last] != None: | |
158 | if last == 0: |
|
162 | if last == 0: | |
159 | self.all = 1 |
|
163 | self.all = 1 | |
160 | self.allmap = 1 |
|
164 | self.allmap = 1 | |
161 | return None |
|
165 | return None | |
162 | last -= 1 |
|
166 | last -= 1 | |
163 | end = (last + 1) * self.s |
|
167 | end = (last + 1) * self.s | |
164 | blocksize = self.s * 256 |
|
168 | blocksize = self.s * 256 | |
165 | while end >= 0: |
|
169 | while end >= 0: | |
166 | start = max(end - blocksize, 0) |
|
170 | start = max(end - blocksize, 0) | |
167 | self.dataf.seek(start) |
|
171 | self.dataf.seek(start) | |
168 | data = self.dataf.read(end - start) |
|
172 | data = self.dataf.read(end - start) | |
169 | findend = end - start |
|
173 | findend = end - start | |
170 | while True: |
|
174 | while True: | |
171 | # we're searching backwards, so weh have to make sure |
|
175 | # we're searching backwards, so weh have to make sure | |
172 | # we don't find a changeset where this node is a parent |
|
176 | # we don't find a changeset where this node is a parent | |
173 | off = data.rfind(node, 0, findend) |
|
177 | off = data.rfind(node, 0, findend) | |
174 | findend = off |
|
178 | findend = off | |
175 | if off >= 0: |
|
179 | if off >= 0: | |
176 | i = off / self.s |
|
180 | i = off / self.s | |
177 | off = i * self.s |
|
181 | off = i * self.s | |
178 | n = data[off + self.shaoffset:off + self.shaoffset + 20] |
|
182 | n = data[off + self.shaoffset:off + self.shaoffset + 20] | |
179 | if n == node: |
|
183 | if n == node: | |
180 | self.map[n] = i + start / self.s |
|
184 | self.map[n] = i + start / self.s | |
181 | return node |
|
185 | return node | |
182 | else: |
|
186 | else: | |
183 | break |
|
187 | break | |
184 | end -= blocksize |
|
188 | end -= blocksize | |
185 | return None |
|
189 | return None | |
186 |
|
190 | |||
187 | def loadindex(self, i=None, end=None): |
|
191 | def loadindex(self, i=None, end=None): | |
188 | if self.all: return |
|
192 | if self.all: return | |
189 | all = False |
|
193 | all = False | |
190 | if i == None: |
|
194 | if i == None: | |
191 | blockstart = 0 |
|
195 | blockstart = 0 | |
192 | blocksize = (512 / self.s) * self.s |
|
196 | blocksize = (512 / self.s) * self.s | |
193 | end = self.datasize |
|
197 | end = self.datasize | |
194 | all = True |
|
198 | all = True | |
195 | else: |
|
199 | else: | |
196 | if end: |
|
200 | if end: | |
197 | blockstart = i * self.s |
|
201 | blockstart = i * self.s | |
198 | end = end * self.s |
|
202 | end = end * self.s | |
199 | blocksize = end - blockstart |
|
203 | blocksize = end - blockstart | |
200 | else: |
|
204 | else: | |
201 | blockstart = (i & ~(32)) * self.s |
|
205 | blockstart = (i & ~(32)) * self.s | |
202 | blocksize = self.s * 64 |
|
206 | blocksize = self.s * 64 | |
203 | end = blockstart + blocksize |
|
207 | end = blockstart + blocksize | |
204 | while blockstart < end: |
|
208 | while blockstart < end: | |
205 | self.loadblock(blockstart, blocksize) |
|
209 | self.loadblock(blockstart, blocksize) | |
206 | blockstart += blocksize |
|
210 | blockstart += blocksize | |
207 | if all: self.all = True |
|
211 | if all: self.all = True | |
208 |
|
212 | |||
209 | class lazyindex(object): |
|
213 | class lazyindex(object): | |
210 | """a lazy version of the index array""" |
|
214 | """a lazy version of the index array""" | |
211 | def __init__(self, parser): |
|
215 | def __init__(self, parser): | |
212 | self.p = parser |
|
216 | self.p = parser | |
213 | def __len__(self): |
|
217 | def __len__(self): | |
214 | return len(self.p.index) |
|
218 | return len(self.p.index) | |
215 | def load(self, pos): |
|
219 | def load(self, pos): | |
216 | if pos < 0: |
|
220 | if pos < 0: | |
217 | pos += len(self.p.index) |
|
221 | pos += len(self.p.index) | |
218 | self.p.loadindex(pos) |
|
222 | self.p.loadindex(pos) | |
219 | return self.p.index[pos] |
|
223 | return self.p.index[pos] | |
220 | def __getitem__(self, pos): |
|
224 | def __getitem__(self, pos): | |
221 | ret = self.p.index[pos] or self.load(pos) |
|
225 | ret = self.p.index[pos] or self.load(pos) | |
222 | if isinstance(ret, str): |
|
226 | if isinstance(ret, str): | |
223 | ret = struct.unpack(self.p.indexformat, ret) |
|
227 | ret = struct.unpack(self.p.indexformat, ret) | |
224 | return ret |
|
228 | return ret | |
225 | def __setitem__(self, pos, item): |
|
229 | def __setitem__(self, pos, item): | |
226 | self.p.index[pos] = item |
|
230 | self.p.index[pos] = item | |
227 | def __delitem__(self, pos): |
|
231 | def __delitem__(self, pos): | |
228 | del self.p.index[pos] |
|
232 | del self.p.index[pos] | |
229 | def append(self, e): |
|
233 | def append(self, e): | |
230 | self.p.index.append(e) |
|
234 | self.p.index.append(e) | |
231 |
|
235 | |||
232 | class lazymap(object): |
|
236 | class lazymap(object): | |
233 | """a lazy version of the node map""" |
|
237 | """a lazy version of the node map""" | |
234 | def __init__(self, parser): |
|
238 | def __init__(self, parser): | |
235 | self.p = parser |
|
239 | self.p = parser | |
236 | def load(self, key): |
|
240 | def load(self, key): | |
237 | n = self.p.findnode(key) |
|
241 | n = self.p.findnode(key) | |
238 | if n == None: |
|
242 | if n == None: | |
239 | raise KeyError(key) |
|
243 | raise KeyError(key) | |
240 | def __contains__(self, key): |
|
244 | def __contains__(self, key): | |
241 | if key in self.p.map: |
|
245 | if key in self.p.map: | |
242 | return True |
|
246 | return True | |
243 | self.p.loadmap() |
|
247 | self.p.loadmap() | |
244 | return key in self.p.map |
|
248 | return key in self.p.map | |
245 | def __iter__(self): |
|
249 | def __iter__(self): | |
246 | yield nullid |
|
250 | yield nullid | |
247 | for i in xrange(self.p.l): |
|
251 | for i in xrange(self.p.l): | |
248 | ret = self.p.index[i] |
|
252 | ret = self.p.index[i] | |
249 | if not ret: |
|
253 | if not ret: | |
250 | self.p.loadindex(i) |
|
254 | self.p.loadindex(i) | |
251 | ret = self.p.index[i] |
|
255 | ret = self.p.index[i] | |
252 | if isinstance(ret, str): |
|
256 | if isinstance(ret, str): | |
253 | ret = struct.unpack(self.p.indexformat, ret) |
|
257 | ret = struct.unpack(self.p.indexformat, ret) | |
254 | yield ret[-1] |
|
258 | yield ret[-1] | |
255 | def __getitem__(self, key): |
|
259 | def __getitem__(self, key): | |
256 | try: |
|
260 | try: | |
257 | return self.p.map[key] |
|
261 | return self.p.map[key] | |
258 | except KeyError: |
|
262 | except KeyError: | |
259 | try: |
|
263 | try: | |
260 | self.load(key) |
|
264 | self.load(key) | |
261 | return self.p.map[key] |
|
265 | return self.p.map[key] | |
262 | except KeyError: |
|
266 | except KeyError: | |
263 | raise KeyError("node " + hex(key)) |
|
267 | raise KeyError("node " + hex(key)) | |
264 | def __setitem__(self, key, val): |
|
268 | def __setitem__(self, key, val): | |
265 | self.p.map[key] = val |
|
269 | self.p.map[key] = val | |
266 | def __delitem__(self, key): |
|
270 | def __delitem__(self, key): | |
267 | del self.p.map[key] |
|
271 | del self.p.map[key] | |
268 |
|
272 | |||
269 | class RevlogError(Exception): pass |
|
273 | class RevlogError(Exception): pass | |
270 |
|
274 | |||
271 | class revlog(object): |
|
275 | class revlog(object): | |
272 | """ |
|
276 | """ | |
273 | the underlying revision storage object |
|
277 | the underlying revision storage object | |
274 |
|
278 | |||
275 | A revlog consists of two parts, an index and the revision data. |
|
279 | A revlog consists of two parts, an index and the revision data. | |
276 |
|
280 | |||
277 | The index is a file with a fixed record size containing |
|
281 | The index is a file with a fixed record size containing | |
278 | information on each revision, includings its nodeid (hash), the |
|
282 | information on each revision, includings its nodeid (hash), the | |
279 | nodeids of its parents, the position and offset of its data within |
|
283 | nodeids of its parents, the position and offset of its data within | |
280 | the data file, and the revision it's based on. Finally, each entry |
|
284 | the data file, and the revision it's based on. Finally, each entry | |
281 | contains a linkrev entry that can serve as a pointer to external |
|
285 | contains a linkrev entry that can serve as a pointer to external | |
282 | data. |
|
286 | data. | |
283 |
|
287 | |||
284 | The revision data itself is a linear collection of data chunks. |
|
288 | The revision data itself is a linear collection of data chunks. | |
285 | Each chunk represents a revision and is usually represented as a |
|
289 | Each chunk represents a revision and is usually represented as a | |
286 | delta against the previous chunk. To bound lookup time, runs of |
|
290 | delta against the previous chunk. To bound lookup time, runs of | |
287 | deltas are limited to about 2 times the length of the original |
|
291 | deltas are limited to about 2 times the length of the original | |
288 | version data. This makes retrieval of a version proportional to |
|
292 | version data. This makes retrieval of a version proportional to | |
289 | its size, or O(1) relative to the number of revisions. |
|
293 | its size, or O(1) relative to the number of revisions. | |
290 |
|
294 | |||
291 | Both pieces of the revlog are written to in an append-only |
|
295 | Both pieces of the revlog are written to in an append-only | |
292 | fashion, which means we never need to rewrite a file to insert or |
|
296 | fashion, which means we never need to rewrite a file to insert or | |
293 | remove data, and can use some simple techniques to avoid the need |
|
297 | remove data, and can use some simple techniques to avoid the need | |
294 | for locking while reading. |
|
298 | for locking while reading. | |
295 | """ |
|
299 | """ | |
296 |
def __init__(self, opener, indexfile, datafile, |
|
300 | def __init__(self, opener, indexfile, datafile, | |
|
301 | defversion=REVLOG_DEFAULT_VERSION): | |||
297 | """ |
|
302 | """ | |
298 | create a revlog object |
|
303 | create a revlog object | |
299 |
|
304 | |||
300 | opener is a function that abstracts the file opening operation |
|
305 | opener is a function that abstracts the file opening operation | |
301 | and can be used to implement COW semantics or the like. |
|
306 | and can be used to implement COW semantics or the like. | |
302 | """ |
|
307 | """ | |
303 | self.indexfile = indexfile |
|
308 | self.indexfile = indexfile | |
304 | self.datafile = datafile |
|
309 | self.datafile = datafile | |
305 | self.opener = opener |
|
310 | self.opener = opener | |
306 |
|
311 | |||
307 | self.indexstat = None |
|
312 | self.indexstat = None | |
308 | self.cache = None |
|
313 | self.cache = None | |
309 | self.chunkcache = None |
|
314 | self.chunkcache = None | |
310 | self.defversion = defversion |
|
315 | self.defversion = defversion | |
311 | self.load() |
|
316 | self.load() | |
312 |
|
317 | |||
313 | def load(self): |
|
318 | def load(self): | |
314 | v = self.defversion |
|
319 | v = self.defversion | |
315 | try: |
|
320 | try: | |
316 | f = self.opener(self.indexfile) |
|
321 | f = self.opener(self.indexfile) | |
317 | i = f.read(4) |
|
322 | i = f.read(4) | |
318 | f.seek(0) |
|
323 | f.seek(0) | |
319 | except IOError, inst: |
|
324 | except IOError, inst: | |
320 | if inst.errno != errno.ENOENT: |
|
325 | if inst.errno != errno.ENOENT: | |
321 | raise |
|
326 | raise | |
322 | i = "" |
|
327 | i = "" | |
323 | else: |
|
328 | else: | |
324 | try: |
|
329 | try: | |
325 | st = util.fstat(f) |
|
330 | st = util.fstat(f) | |
326 | except AttributeError, inst: |
|
331 | except AttributeError, inst: | |
327 | st = None |
|
332 | st = None | |
328 | else: |
|
333 | else: | |
329 | oldst = self.indexstat |
|
334 | oldst = self.indexstat | |
330 | if (oldst and st.st_dev == oldst.st_dev |
|
335 | if (oldst and st.st_dev == oldst.st_dev | |
331 | and st.st_ino == oldst.st_ino |
|
336 | and st.st_ino == oldst.st_ino | |
332 | and st.st_mtime == oldst.st_mtime |
|
337 | and st.st_mtime == oldst.st_mtime | |
333 | and st.st_ctime == oldst.st_ctime): |
|
338 | and st.st_ctime == oldst.st_ctime): | |
334 | return |
|
339 | return | |
335 | self.indexstat = st |
|
340 | self.indexstat = st | |
336 | if len(i) > 0: |
|
341 | if len(i) > 0: | |
337 | v = struct.unpack(versionformat, i)[0] |
|
342 | v = struct.unpack(versionformat, i)[0] | |
338 | flags = v & ~0xFFFF |
|
343 | flags = v & ~0xFFFF | |
339 | fmt = v & 0xFFFF |
|
344 | fmt = v & 0xFFFF | |
340 | if fmt == REVLOGV0: |
|
345 | if fmt == REVLOGV0: | |
341 | if flags: |
|
346 | if flags: | |
342 | raise RevlogError(_("index %s invalid flags %x for format v0" % |
|
347 | raise RevlogError(_("index %s invalid flags %x for format v0" % | |
343 | (self.indexfile, flags))) |
|
348 | (self.indexfile, flags))) | |
344 | elif fmt == REVLOGNG: |
|
349 | elif fmt == REVLOGNG: | |
345 | if flags & ~REVLOGNGINLINEDATA: |
|
350 | if flags & ~REVLOGNGINLINEDATA: | |
346 | raise RevlogError(_("index %s invalid flags %x for revlogng" % |
|
351 | raise RevlogError(_("index %s invalid flags %x for revlogng" % | |
347 | (self.indexfile, flags))) |
|
352 | (self.indexfile, flags))) | |
348 | else: |
|
353 | else: | |
349 | raise RevlogError(_("index %s invalid format %d" % |
|
354 | raise RevlogError(_("index %s invalid format %d" % | |
350 | (self.indexfile, fmt))) |
|
355 | (self.indexfile, fmt))) | |
351 | self.version = v |
|
356 | self.version = v | |
352 | if v == REVLOGV0: |
|
357 | if v == REVLOGV0: | |
353 | self.indexformat = indexformatv0 |
|
358 | self.indexformat = indexformatv0 | |
354 | shaoffset = v0shaoffset |
|
359 | shaoffset = v0shaoffset | |
355 | else: |
|
360 | else: | |
356 | self.indexformat = indexformatng |
|
361 | self.indexformat = indexformatng | |
357 | shaoffset = ngshaoffset |
|
362 | shaoffset = ngshaoffset | |
358 |
|
363 | |||
359 | if i: |
|
364 | if i: | |
360 | if not self.inlinedata() and st and st.st_size > 10000: |
|
365 | if not self.inlinedata() and st and st.st_size > 10000: | |
361 | # big index, let's parse it on demand |
|
366 | # big index, let's parse it on demand | |
362 | parser = lazyparser(f, st.st_size, self.indexformat, shaoffset) |
|
367 | parser = lazyparser(f, st.st_size, self.indexformat, shaoffset) | |
363 | self.index = lazyindex(parser) |
|
368 | self.index = lazyindex(parser) | |
364 | self.nodemap = lazymap(parser) |
|
369 | self.nodemap = lazymap(parser) | |
365 | else: |
|
370 | else: | |
366 | i = f.read() |
|
371 | i = f.read() | |
367 | self.parseindex(i) |
|
372 | self.parseindex(i) | |
368 | if self.inlinedata(): |
|
373 | if self.inlinedata(): | |
369 | # we've already got the entire data file read in, save it |
|
374 | # we've already got the entire data file read in, save it | |
370 | # in the chunk data |
|
375 | # in the chunk data | |
371 | self.chunkcache = (0, i) |
|
376 | self.chunkcache = (0, i) | |
372 | if self.version != REVLOGV0: |
|
377 | if self.version != REVLOGV0: | |
373 | e = list(self.index[0]) |
|
378 | e = list(self.index[0]) | |
374 | type = self.ngtype(e[0]) |
|
379 | type = self.ngtype(e[0]) | |
375 | e[0] = self.offset_type(0, type) |
|
380 | e[0] = self.offset_type(0, type) | |
376 | self.index[0] = e |
|
381 | self.index[0] = e | |
377 | else: |
|
382 | else: | |
378 | self.nodemap = { nullid: -1} |
|
383 | self.nodemap = { nullid: -1} | |
379 | self.index = [] |
|
384 | self.index = [] | |
380 |
|
385 | |||
381 |
|
386 | |||
382 | def parseindex(self, data): |
|
387 | def parseindex(self, data): | |
383 | s = struct.calcsize(self.indexformat) |
|
388 | s = struct.calcsize(self.indexformat) | |
384 | l = len(data) |
|
389 | l = len(data) | |
385 | self.index = [] |
|
390 | self.index = [] | |
386 | self.nodemap = {nullid: -1} |
|
391 | self.nodemap = {nullid: -1} | |
387 | inline = self.inlinedata() |
|
392 | inline = self.inlinedata() | |
388 | off = 0 |
|
393 | off = 0 | |
389 | n = 0 |
|
394 | n = 0 | |
390 | while off < l: |
|
395 | while off < l: | |
391 | e = struct.unpack(self.indexformat, data[off:off + s]) |
|
396 | e = struct.unpack(self.indexformat, data[off:off + s]) | |
392 | self.index.append(e) |
|
397 | self.index.append(e) | |
393 | self.nodemap[e[-1]] = n |
|
398 | self.nodemap[e[-1]] = n | |
394 | n += 1 |
|
399 | n += 1 | |
395 | off += s |
|
400 | off += s | |
396 | if inline: |
|
401 | if inline: | |
397 | off += e[1] |
|
402 | off += e[1] | |
398 |
|
403 | |||
399 | def ngoffset(self, q): |
|
404 | def ngoffset(self, q): | |
400 | if q & 0xFFFF: |
|
405 | if q & 0xFFFF: | |
401 | raise RevlogError(_('%s: incompatible revision flag %x') % |
|
406 | raise RevlogError(_('%s: incompatible revision flag %x') % | |
402 | (self.indexfile, q)) |
|
407 | (self.indexfile, q)) | |
403 | return long(q >> 16) |
|
408 | return long(q >> 16) | |
404 |
|
409 | |||
405 | def ngtype(self, q): |
|
410 | def ngtype(self, q): | |
406 | return int(q & 0xFFFF) |
|
411 | return int(q & 0xFFFF) | |
407 |
|
412 | |||
408 | def offset_type(self, offset, type): |
|
413 | def offset_type(self, offset, type): | |
409 | return long(long(offset) << 16 | type) |
|
414 | return long(long(offset) << 16 | type) | |
410 |
|
415 | |||
411 | def loadindex(self, start, end): |
|
416 | def loadindex(self, start, end): | |
412 | """load a block of indexes all at once from the lazy parser""" |
|
417 | """load a block of indexes all at once from the lazy parser""" | |
413 | if isinstance(self.index, lazyindex): |
|
418 | if isinstance(self.index, lazyindex): | |
414 | self.index.p.loadindex(start, end) |
|
419 | self.index.p.loadindex(start, end) | |
415 |
|
420 | |||
416 | def loadindexmap(self): |
|
421 | def loadindexmap(self): | |
417 | """loads both the map and the index from the lazy parser""" |
|
422 | """loads both the map and the index from the lazy parser""" | |
418 | if isinstance(self.index, lazyindex): |
|
423 | if isinstance(self.index, lazyindex): | |
419 | p = self.index.p |
|
424 | p = self.index.p | |
420 | p.loadindex() |
|
425 | p.loadindex() | |
421 | self.nodemap = p.map |
|
426 | self.nodemap = p.map | |
422 |
|
427 | |||
423 | def loadmap(self): |
|
428 | def loadmap(self): | |
424 | """loads the map from the lazy parser""" |
|
429 | """loads the map from the lazy parser""" | |
425 | if isinstance(self.nodemap, lazymap): |
|
430 | if isinstance(self.nodemap, lazymap): | |
426 | self.nodemap.p.loadmap() |
|
431 | self.nodemap.p.loadmap() | |
427 | self.nodemap = self.nodemap.p.map |
|
432 | self.nodemap = self.nodemap.p.map | |
428 |
|
433 | |||
429 | def inlinedata(self): return self.version & REVLOGNGINLINEDATA |
|
434 | def inlinedata(self): return self.version & REVLOGNGINLINEDATA | |
430 | def tip(self): return self.node(len(self.index) - 1) |
|
435 | def tip(self): return self.node(len(self.index) - 1) | |
431 | def count(self): return len(self.index) |
|
436 | def count(self): return len(self.index) | |
432 | def node(self, rev): |
|
437 | def node(self, rev): | |
433 | return (rev < 0) and nullid or self.index[rev][-1] |
|
438 | return (rev < 0) and nullid or self.index[rev][-1] | |
434 | def rev(self, node): |
|
439 | def rev(self, node): | |
435 | try: |
|
440 | try: | |
436 | return self.nodemap[node] |
|
441 | return self.nodemap[node] | |
437 | except KeyError: |
|
442 | except KeyError: | |
438 | raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node))) |
|
443 | raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node))) | |
439 | def linkrev(self, node): return self.index[self.rev(node)][-4] |
|
444 | def linkrev(self, node): return self.index[self.rev(node)][-4] | |
440 | def parents(self, node): |
|
445 | def parents(self, node): | |
441 | if node == nullid: return (nullid, nullid) |
|
446 | if node == nullid: return (nullid, nullid) | |
442 | r = self.rev(node) |
|
447 | r = self.rev(node) | |
443 | d = self.index[r][-3:-1] |
|
448 | d = self.index[r][-3:-1] | |
444 | if self.version == REVLOGV0: |
|
449 | if self.version == REVLOGV0: | |
445 | return d |
|
450 | return d | |
446 | return [ self.node(x) for x in d ] |
|
451 | return [ self.node(x) for x in d ] | |
447 | def start(self, rev): |
|
452 | def start(self, rev): | |
448 | if rev < 0: |
|
453 | if rev < 0: | |
449 | return -1 |
|
454 | return -1 | |
450 | if self.version != REVLOGV0: |
|
455 | if self.version != REVLOGV0: | |
451 | return self.ngoffset(self.index[rev][0]) |
|
456 | return self.ngoffset(self.index[rev][0]) | |
452 | return self.index[rev][0] |
|
457 | return self.index[rev][0] | |
453 |
|
458 | |||
454 | def end(self, rev): return self.start(rev) + self.length(rev) |
|
459 | def end(self, rev): return self.start(rev) + self.length(rev) | |
455 |
|
460 | |||
456 | def size(self, rev): |
|
461 | def size(self, rev): | |
457 | """return the length of the uncompressed text for a given revision""" |
|
462 | """return the length of the uncompressed text for a given revision""" | |
458 | l = -1 |
|
463 | l = -1 | |
459 | if self.version != REVLOGV0: |
|
464 | if self.version != REVLOGV0: | |
460 | l = self.index[rev][2] |
|
465 | l = self.index[rev][2] | |
461 | if l >= 0: |
|
466 | if l >= 0: | |
462 | return l |
|
467 | return l | |
463 |
|
468 | |||
464 | t = self.revision(self.node(rev)) |
|
469 | t = self.revision(self.node(rev)) | |
465 | return len(t) |
|
470 | return len(t) | |
466 |
|
471 | |||
467 | # alternate implementation, The advantage to this code is it |
|
472 | # alternate implementation, The advantage to this code is it | |
468 | # will be faster for a single revision. But, the results are not |
|
473 | # will be faster for a single revision. But, the results are not | |
469 | # cached, so finding the size of every revision will be slower. |
|
474 | # cached, so finding the size of every revision will be slower. | |
470 | """ |
|
475 | """ | |
471 | if self.cache and self.cache[1] == rev: |
|
476 | if self.cache and self.cache[1] == rev: | |
472 | return len(self.cache[2]) |
|
477 | return len(self.cache[2]) | |
473 |
|
478 | |||
474 | base = self.base(rev) |
|
479 | base = self.base(rev) | |
475 | if self.cache and self.cache[1] >= base and self.cache[1] < rev: |
|
480 | if self.cache and self.cache[1] >= base and self.cache[1] < rev: | |
476 | base = self.cache[1] |
|
481 | base = self.cache[1] | |
477 | text = self.cache[2] |
|
482 | text = self.cache[2] | |
478 | else: |
|
483 | else: | |
479 | text = self.revision(self.node(base)) |
|
484 | text = self.revision(self.node(base)) | |
480 |
|
485 | |||
481 | l = len(text) |
|
486 | l = len(text) | |
482 | for x in xrange(base + 1, rev + 1): |
|
487 | for x in xrange(base + 1, rev + 1): | |
483 | l = mdiff.patchedsize(l, self.chunk(x)) |
|
488 | l = mdiff.patchedsize(l, self.chunk(x)) | |
484 | return l |
|
489 | return l | |
485 | """ |
|
490 | """ | |
486 |
|
491 | |||
487 | def length(self, rev): |
|
492 | def length(self, rev): | |
488 | if rev < 0: |
|
493 | if rev < 0: | |
489 | return 0 |
|
494 | return 0 | |
490 | else: |
|
495 | else: | |
491 | return self.index[rev][1] |
|
496 | return self.index[rev][1] | |
492 | def base(self, rev): return (rev < 0) and rev or self.index[rev][-5] |
|
497 | def base(self, rev): return (rev < 0) and rev or self.index[rev][-5] | |
493 |
|
498 | |||
494 | def reachable(self, rev, stop=None): |
|
499 | def reachable(self, rev, stop=None): | |
495 | reachable = {} |
|
500 | reachable = {} | |
496 | visit = [rev] |
|
501 | visit = [rev] | |
497 | reachable[rev] = 1 |
|
502 | reachable[rev] = 1 | |
498 | if stop: |
|
503 | if stop: | |
499 | stopn = self.rev(stop) |
|
504 | stopn = self.rev(stop) | |
500 | else: |
|
505 | else: | |
501 | stopn = 0 |
|
506 | stopn = 0 | |
502 | while visit: |
|
507 | while visit: | |
503 | n = visit.pop(0) |
|
508 | n = visit.pop(0) | |
504 | if n == stop: |
|
509 | if n == stop: | |
505 | continue |
|
510 | continue | |
506 | if n == nullid: |
|
511 | if n == nullid: | |
507 | continue |
|
512 | continue | |
508 | for p in self.parents(n): |
|
513 | for p in self.parents(n): | |
509 | if self.rev(p) < stopn: |
|
514 | if self.rev(p) < stopn: | |
510 | continue |
|
515 | continue | |
511 | if p not in reachable: |
|
516 | if p not in reachable: | |
512 | reachable[p] = 1 |
|
517 | reachable[p] = 1 | |
513 | visit.append(p) |
|
518 | visit.append(p) | |
514 | return reachable |
|
519 | return reachable | |
515 |
|
520 | |||
516 | def nodesbetween(self, roots=None, heads=None): |
|
521 | def nodesbetween(self, roots=None, heads=None): | |
517 | """Return a tuple containing three elements. Elements 1 and 2 contain |
|
522 | """Return a tuple containing three elements. Elements 1 and 2 contain | |
518 | a final list bases and heads after all the unreachable ones have been |
|
523 | a final list bases and heads after all the unreachable ones have been | |
519 | pruned. Element 0 contains a topologically sorted list of all |
|
524 | pruned. Element 0 contains a topologically sorted list of all | |
520 |
|
525 | |||
521 | nodes that satisfy these constraints: |
|
526 | nodes that satisfy these constraints: | |
522 | 1. All nodes must be descended from a node in roots (the nodes on |
|
527 | 1. All nodes must be descended from a node in roots (the nodes on | |
523 | roots are considered descended from themselves). |
|
528 | roots are considered descended from themselves). | |
524 | 2. All nodes must also be ancestors of a node in heads (the nodes in |
|
529 | 2. All nodes must also be ancestors of a node in heads (the nodes in | |
525 | heads are considered to be their own ancestors). |
|
530 | heads are considered to be their own ancestors). | |
526 |
|
531 | |||
527 | If roots is unspecified, nullid is assumed as the only root. |
|
532 | If roots is unspecified, nullid is assumed as the only root. | |
528 | If heads is unspecified, it is taken to be the output of the |
|
533 | If heads is unspecified, it is taken to be the output of the | |
529 | heads method (i.e. a list of all nodes in the repository that |
|
534 | heads method (i.e. a list of all nodes in the repository that | |
530 | have no children).""" |
|
535 | have no children).""" | |
531 | nonodes = ([], [], []) |
|
536 | nonodes = ([], [], []) | |
532 | if roots is not None: |
|
537 | if roots is not None: | |
533 | roots = list(roots) |
|
538 | roots = list(roots) | |
534 | if not roots: |
|
539 | if not roots: | |
535 | return nonodes |
|
540 | return nonodes | |
536 | lowestrev = min([self.rev(n) for n in roots]) |
|
541 | lowestrev = min([self.rev(n) for n in roots]) | |
537 | else: |
|
542 | else: | |
538 | roots = [nullid] # Everybody's a descendent of nullid |
|
543 | roots = [nullid] # Everybody's a descendent of nullid | |
539 | lowestrev = -1 |
|
544 | lowestrev = -1 | |
540 | if (lowestrev == -1) and (heads is None): |
|
545 | if (lowestrev == -1) and (heads is None): | |
541 | # We want _all_ the nodes! |
|
546 | # We want _all_ the nodes! | |
542 | return ([self.node(r) for r in xrange(0, self.count())], |
|
547 | return ([self.node(r) for r in xrange(0, self.count())], | |
543 | [nullid], list(self.heads())) |
|
548 | [nullid], list(self.heads())) | |
544 | if heads is None: |
|
549 | if heads is None: | |
545 | # All nodes are ancestors, so the latest ancestor is the last |
|
550 | # All nodes are ancestors, so the latest ancestor is the last | |
546 | # node. |
|
551 | # node. | |
547 | highestrev = self.count() - 1 |
|
552 | highestrev = self.count() - 1 | |
548 | # Set ancestors to None to signal that every node is an ancestor. |
|
553 | # Set ancestors to None to signal that every node is an ancestor. | |
549 | ancestors = None |
|
554 | ancestors = None | |
550 | # Set heads to an empty dictionary for later discovery of heads |
|
555 | # Set heads to an empty dictionary for later discovery of heads | |
551 | heads = {} |
|
556 | heads = {} | |
552 | else: |
|
557 | else: | |
553 | heads = list(heads) |
|
558 | heads = list(heads) | |
554 | if not heads: |
|
559 | if not heads: | |
555 | return nonodes |
|
560 | return nonodes | |
556 | ancestors = {} |
|
561 | ancestors = {} | |
557 | # Start at the top and keep marking parents until we're done. |
|
562 | # Start at the top and keep marking parents until we're done. | |
558 | nodestotag = heads[:] |
|
563 | nodestotag = heads[:] | |
559 | # Turn heads into a dictionary so we can remove 'fake' heads. |
|
564 | # Turn heads into a dictionary so we can remove 'fake' heads. | |
560 | # Also, later we will be using it to filter out the heads we can't |
|
565 | # Also, later we will be using it to filter out the heads we can't | |
561 | # find from roots. |
|
566 | # find from roots. | |
562 | heads = dict.fromkeys(heads, 0) |
|
567 | heads = dict.fromkeys(heads, 0) | |
563 | # Remember where the top was so we can use it as a limit later. |
|
568 | # Remember where the top was so we can use it as a limit later. | |
564 | highestrev = max([self.rev(n) for n in nodestotag]) |
|
569 | highestrev = max([self.rev(n) for n in nodestotag]) | |
565 | while nodestotag: |
|
570 | while nodestotag: | |
566 | # grab a node to tag |
|
571 | # grab a node to tag | |
567 | n = nodestotag.pop() |
|
572 | n = nodestotag.pop() | |
568 | # Never tag nullid |
|
573 | # Never tag nullid | |
569 | if n == nullid: |
|
574 | if n == nullid: | |
570 | continue |
|
575 | continue | |
571 | # A node's revision number represents its place in a |
|
576 | # A node's revision number represents its place in a | |
572 | # topologically sorted list of nodes. |
|
577 | # topologically sorted list of nodes. | |
573 | r = self.rev(n) |
|
578 | r = self.rev(n) | |
574 | if r >= lowestrev: |
|
579 | if r >= lowestrev: | |
575 | if n not in ancestors: |
|
580 | if n not in ancestors: | |
576 | # If we are possibly a descendent of one of the roots |
|
581 | # If we are possibly a descendent of one of the roots | |
577 | # and we haven't already been marked as an ancestor |
|
582 | # and we haven't already been marked as an ancestor | |
578 | ancestors[n] = 1 # Mark as ancestor |
|
583 | ancestors[n] = 1 # Mark as ancestor | |
579 | # Add non-nullid parents to list of nodes to tag. |
|
584 | # Add non-nullid parents to list of nodes to tag. | |
580 | nodestotag.extend([p for p in self.parents(n) if |
|
585 | nodestotag.extend([p for p in self.parents(n) if | |
581 | p != nullid]) |
|
586 | p != nullid]) | |
582 | elif n in heads: # We've seen it before, is it a fake head? |
|
587 | elif n in heads: # We've seen it before, is it a fake head? | |
583 | # So it is, real heads should not be the ancestors of |
|
588 | # So it is, real heads should not be the ancestors of | |
584 | # any other heads. |
|
589 | # any other heads. | |
585 | heads.pop(n) |
|
590 | heads.pop(n) | |
586 | if not ancestors: |
|
591 | if not ancestors: | |
587 | return nonodes |
|
592 | return nonodes | |
588 | # Now that we have our set of ancestors, we want to remove any |
|
593 | # Now that we have our set of ancestors, we want to remove any | |
589 | # roots that are not ancestors. |
|
594 | # roots that are not ancestors. | |
590 |
|
595 | |||
591 | # If one of the roots was nullid, everything is included anyway. |
|
596 | # If one of the roots was nullid, everything is included anyway. | |
592 | if lowestrev > -1: |
|
597 | if lowestrev > -1: | |
593 | # But, since we weren't, let's recompute the lowest rev to not |
|
598 | # But, since we weren't, let's recompute the lowest rev to not | |
594 | # include roots that aren't ancestors. |
|
599 | # include roots that aren't ancestors. | |
595 |
|
600 | |||
596 | # Filter out roots that aren't ancestors of heads |
|
601 | # Filter out roots that aren't ancestors of heads | |
597 | roots = [n for n in roots if n in ancestors] |
|
602 | roots = [n for n in roots if n in ancestors] | |
598 | # Recompute the lowest revision |
|
603 | # Recompute the lowest revision | |
599 | if roots: |
|
604 | if roots: | |
600 | lowestrev = min([self.rev(n) for n in roots]) |
|
605 | lowestrev = min([self.rev(n) for n in roots]) | |
601 | else: |
|
606 | else: | |
602 | # No more roots? Return empty list |
|
607 | # No more roots? Return empty list | |
603 | return nonodes |
|
608 | return nonodes | |
604 | else: |
|
609 | else: | |
605 | # We are descending from nullid, and don't need to care about |
|
610 | # We are descending from nullid, and don't need to care about | |
606 | # any other roots. |
|
611 | # any other roots. | |
607 | lowestrev = -1 |
|
612 | lowestrev = -1 | |
608 | roots = [nullid] |
|
613 | roots = [nullid] | |
609 | # Transform our roots list into a 'set' (i.e. a dictionary where the |
|
614 | # Transform our roots list into a 'set' (i.e. a dictionary where the | |
610 | # values don't matter. |
|
615 | # values don't matter. | |
611 | descendents = dict.fromkeys(roots, 1) |
|
616 | descendents = dict.fromkeys(roots, 1) | |
612 | # Also, keep the original roots so we can filter out roots that aren't |
|
617 | # Also, keep the original roots so we can filter out roots that aren't | |
613 | # 'real' roots (i.e. are descended from other roots). |
|
618 | # 'real' roots (i.e. are descended from other roots). | |
614 | roots = descendents.copy() |
|
619 | roots = descendents.copy() | |
615 | # Our topologically sorted list of output nodes. |
|
620 | # Our topologically sorted list of output nodes. | |
616 | orderedout = [] |
|
621 | orderedout = [] | |
617 | # Don't start at nullid since we don't want nullid in our output list, |
|
622 | # Don't start at nullid since we don't want nullid in our output list, | |
618 | # and if nullid shows up in descedents, empty parents will look like |
|
623 | # and if nullid shows up in descedents, empty parents will look like | |
619 | # they're descendents. |
|
624 | # they're descendents. | |
620 | for r in xrange(max(lowestrev, 0), highestrev + 1): |
|
625 | for r in xrange(max(lowestrev, 0), highestrev + 1): | |
621 | n = self.node(r) |
|
626 | n = self.node(r) | |
622 | isdescendent = False |
|
627 | isdescendent = False | |
623 | if lowestrev == -1: # Everybody is a descendent of nullid |
|
628 | if lowestrev == -1: # Everybody is a descendent of nullid | |
624 | isdescendent = True |
|
629 | isdescendent = True | |
625 | elif n in descendents: |
|
630 | elif n in descendents: | |
626 | # n is already a descendent |
|
631 | # n is already a descendent | |
627 | isdescendent = True |
|
632 | isdescendent = True | |
628 | # This check only needs to be done here because all the roots |
|
633 | # This check only needs to be done here because all the roots | |
629 | # will start being marked is descendents before the loop. |
|
634 | # will start being marked is descendents before the loop. | |
630 | if n in roots: |
|
635 | if n in roots: | |
631 | # If n was a root, check if it's a 'real' root. |
|
636 | # If n was a root, check if it's a 'real' root. | |
632 | p = tuple(self.parents(n)) |
|
637 | p = tuple(self.parents(n)) | |
633 | # If any of its parents are descendents, it's not a root. |
|
638 | # If any of its parents are descendents, it's not a root. | |
634 | if (p[0] in descendents) or (p[1] in descendents): |
|
639 | if (p[0] in descendents) or (p[1] in descendents): | |
635 | roots.pop(n) |
|
640 | roots.pop(n) | |
636 | else: |
|
641 | else: | |
637 | p = tuple(self.parents(n)) |
|
642 | p = tuple(self.parents(n)) | |
638 | # A node is a descendent if either of its parents are |
|
643 | # A node is a descendent if either of its parents are | |
639 | # descendents. (We seeded the dependents list with the roots |
|
644 | # descendents. (We seeded the dependents list with the roots | |
640 | # up there, remember?) |
|
645 | # up there, remember?) | |
641 | if (p[0] in descendents) or (p[1] in descendents): |
|
646 | if (p[0] in descendents) or (p[1] in descendents): | |
642 | descendents[n] = 1 |
|
647 | descendents[n] = 1 | |
643 | isdescendent = True |
|
648 | isdescendent = True | |
644 | if isdescendent and ((ancestors is None) or (n in ancestors)): |
|
649 | if isdescendent and ((ancestors is None) or (n in ancestors)): | |
645 | # Only include nodes that are both descendents and ancestors. |
|
650 | # Only include nodes that are both descendents and ancestors. | |
646 | orderedout.append(n) |
|
651 | orderedout.append(n) | |
647 | if (ancestors is not None) and (n in heads): |
|
652 | if (ancestors is not None) and (n in heads): | |
648 | # We're trying to figure out which heads are reachable |
|
653 | # We're trying to figure out which heads are reachable | |
649 | # from roots. |
|
654 | # from roots. | |
650 | # Mark this head as having been reached |
|
655 | # Mark this head as having been reached | |
651 | heads[n] = 1 |
|
656 | heads[n] = 1 | |
652 | elif ancestors is None: |
|
657 | elif ancestors is None: | |
653 | # Otherwise, we're trying to discover the heads. |
|
658 | # Otherwise, we're trying to discover the heads. | |
654 | # Assume this is a head because if it isn't, the next step |
|
659 | # Assume this is a head because if it isn't, the next step | |
655 | # will eventually remove it. |
|
660 | # will eventually remove it. | |
656 | heads[n] = 1 |
|
661 | heads[n] = 1 | |
657 | # But, obviously its parents aren't. |
|
662 | # But, obviously its parents aren't. | |
658 | for p in self.parents(n): |
|
663 | for p in self.parents(n): | |
659 | heads.pop(p, None) |
|
664 | heads.pop(p, None) | |
660 | heads = [n for n in heads.iterkeys() if heads[n] != 0] |
|
665 | heads = [n for n in heads.iterkeys() if heads[n] != 0] | |
661 | roots = roots.keys() |
|
666 | roots = roots.keys() | |
662 | assert orderedout |
|
667 | assert orderedout | |
663 | assert roots |
|
668 | assert roots | |
664 | assert heads |
|
669 | assert heads | |
665 | return (orderedout, roots, heads) |
|
670 | return (orderedout, roots, heads) | |
666 |
|
671 | |||
667 | def heads(self, start=None): |
|
672 | def heads(self, start=None): | |
668 | """return the list of all nodes that have no children |
|
673 | """return the list of all nodes that have no children | |
669 |
|
674 | |||
670 | if start is specified, only heads that are descendants of |
|
675 | if start is specified, only heads that are descendants of | |
671 | start will be returned |
|
676 | start will be returned | |
672 |
|
677 | |||
673 | """ |
|
678 | """ | |
674 | if start is None: |
|
679 | if start is None: | |
675 | start = nullid |
|
680 | start = nullid | |
676 | reachable = {start: 1} |
|
681 | reachable = {start: 1} | |
677 | heads = {start: 1} |
|
682 | heads = {start: 1} | |
678 | startrev = self.rev(start) |
|
683 | startrev = self.rev(start) | |
679 |
|
684 | |||
680 | for r in xrange(startrev + 1, self.count()): |
|
685 | for r in xrange(startrev + 1, self.count()): | |
681 | n = self.node(r) |
|
686 | n = self.node(r) | |
682 | for pn in self.parents(n): |
|
687 | for pn in self.parents(n): | |
683 | if pn in reachable: |
|
688 | if pn in reachable: | |
684 | reachable[n] = 1 |
|
689 | reachable[n] = 1 | |
685 | heads[n] = 1 |
|
690 | heads[n] = 1 | |
686 | if pn in heads: |
|
691 | if pn in heads: | |
687 | del heads[pn] |
|
692 | del heads[pn] | |
688 | return heads.keys() |
|
693 | return heads.keys() | |
689 |
|
694 | |||
690 | def children(self, node): |
|
695 | def children(self, node): | |
691 | """find the children of a given node""" |
|
696 | """find the children of a given node""" | |
692 | c = [] |
|
697 | c = [] | |
693 | p = self.rev(node) |
|
698 | p = self.rev(node) | |
694 | for r in range(p + 1, self.count()): |
|
699 | for r in range(p + 1, self.count()): | |
695 | n = self.node(r) |
|
700 | n = self.node(r) | |
696 | for pn in self.parents(n): |
|
701 | for pn in self.parents(n): | |
697 | if pn == node: |
|
702 | if pn == node: | |
698 | c.append(n) |
|
703 | c.append(n) | |
699 | continue |
|
704 | continue | |
700 | elif pn == nullid: |
|
705 | elif pn == nullid: | |
701 | continue |
|
706 | continue | |
702 | return c |
|
707 | return c | |
703 |
|
708 | |||
704 | def lookup(self, id): |
|
709 | def lookup(self, id): | |
705 | """locate a node based on revision number or subset of hex nodeid""" |
|
710 | """locate a node based on revision number or subset of hex nodeid""" | |
706 | try: |
|
711 | try: | |
707 | rev = int(id) |
|
712 | rev = int(id) | |
708 | if str(rev) != id: raise ValueError |
|
713 | if str(rev) != id: raise ValueError | |
709 | if rev < 0: rev = self.count() + rev |
|
714 | if rev < 0: rev = self.count() + rev | |
710 | if rev < 0 or rev >= self.count(): raise ValueError |
|
715 | if rev < 0 or rev >= self.count(): raise ValueError | |
711 | return self.node(rev) |
|
716 | return self.node(rev) | |
712 | except (ValueError, OverflowError): |
|
717 | except (ValueError, OverflowError): | |
713 | c = [] |
|
718 | c = [] | |
714 | for n in self.nodemap: |
|
719 | for n in self.nodemap: | |
715 | if hex(n).startswith(id): |
|
720 | if hex(n).startswith(id): | |
716 | c.append(n) |
|
721 | c.append(n) | |
717 | if len(c) > 1: raise RevlogError(_("Ambiguous identifier")) |
|
722 | if len(c) > 1: raise RevlogError(_("Ambiguous identifier")) | |
718 | if len(c) < 1: raise RevlogError(_("No match found")) |
|
723 | if len(c) < 1: raise RevlogError(_("No match found")) | |
719 | return c[0] |
|
724 | return c[0] | |
720 |
|
725 | |||
721 | return None |
|
726 | return None | |
722 |
|
727 | |||
723 | def diff(self, a, b): |
|
728 | def diff(self, a, b): | |
724 | """return a delta between two revisions""" |
|
729 | """return a delta between two revisions""" | |
725 | return mdiff.textdiff(a, b) |
|
730 | return mdiff.textdiff(a, b) | |
726 |
|
731 | |||
727 | def patches(self, t, pl): |
|
732 | def patches(self, t, pl): | |
728 | """apply a list of patches to a string""" |
|
733 | """apply a list of patches to a string""" | |
729 | return mdiff.patches(t, pl) |
|
734 | return mdiff.patches(t, pl) | |
730 |
|
735 | |||
731 | def chunk(self, rev, df=None, cachelen=4096): |
|
736 | def chunk(self, rev, df=None, cachelen=4096): | |
732 | start, length = self.start(rev), self.length(rev) |
|
737 | start, length = self.start(rev), self.length(rev) | |
733 | inline = self.inlinedata() |
|
738 | inline = self.inlinedata() | |
734 | if inline: |
|
739 | if inline: | |
735 | start += (rev + 1) * struct.calcsize(self.indexformat) |
|
740 | start += (rev + 1) * struct.calcsize(self.indexformat) | |
736 | end = start + length |
|
741 | end = start + length | |
737 | def loadcache(df): |
|
742 | def loadcache(df): | |
738 | cache_length = max(cachelen, length) # 4k |
|
743 | cache_length = max(cachelen, length) # 4k | |
739 | if not df: |
|
744 | if not df: | |
740 | if inline: |
|
745 | if inline: | |
741 | df = self.opener(self.indexfile) |
|
746 | df = self.opener(self.indexfile) | |
742 | else: |
|
747 | else: | |
743 | df = self.opener(self.datafile) |
|
748 | df = self.opener(self.datafile) | |
744 | df.seek(start) |
|
749 | df.seek(start) | |
745 | self.chunkcache = (start, df.read(cache_length)) |
|
750 | self.chunkcache = (start, df.read(cache_length)) | |
746 |
|
751 | |||
747 | if not self.chunkcache: |
|
752 | if not self.chunkcache: | |
748 | loadcache(df) |
|
753 | loadcache(df) | |
749 |
|
754 | |||
750 | cache_start = self.chunkcache[0] |
|
755 | cache_start = self.chunkcache[0] | |
751 | cache_end = cache_start + len(self.chunkcache[1]) |
|
756 | cache_end = cache_start + len(self.chunkcache[1]) | |
752 | if start >= cache_start and end <= cache_end: |
|
757 | if start >= cache_start and end <= cache_end: | |
753 | # it is cached |
|
758 | # it is cached | |
754 | offset = start - cache_start |
|
759 | offset = start - cache_start | |
755 | else: |
|
760 | else: | |
756 | loadcache(df) |
|
761 | loadcache(df) | |
757 | offset = 0 |
|
762 | offset = 0 | |
758 |
|
763 | |||
759 | #def checkchunk(): |
|
764 | #def checkchunk(): | |
760 | # df = self.opener(self.datafile) |
|
765 | # df = self.opener(self.datafile) | |
761 | # df.seek(start) |
|
766 | # df.seek(start) | |
762 | # return df.read(length) |
|
767 | # return df.read(length) | |
763 | #assert s == checkchunk() |
|
768 | #assert s == checkchunk() | |
764 | return decompress(self.chunkcache[1][offset:offset + length]) |
|
769 | return decompress(self.chunkcache[1][offset:offset + length]) | |
765 |
|
770 | |||
766 | def delta(self, node): |
|
771 | def delta(self, node): | |
767 | """return or calculate a delta between a node and its predecessor""" |
|
772 | """return or calculate a delta between a node and its predecessor""" | |
768 | r = self.rev(node) |
|
773 | r = self.rev(node) | |
769 | return self.revdiff(r - 1, r) |
|
774 | return self.revdiff(r - 1, r) | |
770 |
|
775 | |||
771 | def revdiff(self, rev1, rev2): |
|
776 | def revdiff(self, rev1, rev2): | |
772 | """return or calculate a delta between two revisions""" |
|
777 | """return or calculate a delta between two revisions""" | |
773 | b1 = self.base(rev1) |
|
778 | b1 = self.base(rev1) | |
774 | b2 = self.base(rev2) |
|
779 | b2 = self.base(rev2) | |
775 | if b1 == b2 and rev1 + 1 == rev2: |
|
780 | if b1 == b2 and rev1 + 1 == rev2: | |
776 | return self.chunk(rev2) |
|
781 | return self.chunk(rev2) | |
777 | else: |
|
782 | else: | |
778 | return self.diff(self.revision(self.node(rev1)), |
|
783 | return self.diff(self.revision(self.node(rev1)), | |
779 | self.revision(self.node(rev2))) |
|
784 | self.revision(self.node(rev2))) | |
780 |
|
785 | |||
781 | def revision(self, node): |
|
786 | def revision(self, node): | |
782 | """return an uncompressed revision of a given""" |
|
787 | """return an uncompressed revision of a given""" | |
783 | if node == nullid: return "" |
|
788 | if node == nullid: return "" | |
784 | if self.cache and self.cache[0] == node: return self.cache[2] |
|
789 | if self.cache and self.cache[0] == node: return self.cache[2] | |
785 |
|
790 | |||
786 | # look up what we need to read |
|
791 | # look up what we need to read | |
787 | text = None |
|
792 | text = None | |
788 | rev = self.rev(node) |
|
793 | rev = self.rev(node) | |
789 | base = self.base(rev) |
|
794 | base = self.base(rev) | |
790 |
|
795 | |||
791 | if self.inlinedata(): |
|
796 | if self.inlinedata(): | |
792 | # we probably have the whole chunk cached |
|
797 | # we probably have the whole chunk cached | |
793 | df = None |
|
798 | df = None | |
794 | else: |
|
799 | else: | |
795 | df = self.opener(self.datafile) |
|
800 | df = self.opener(self.datafile) | |
796 |
|
801 | |||
797 | # do we have useful data cached? |
|
802 | # do we have useful data cached? | |
798 | if self.cache and self.cache[1] >= base and self.cache[1] < rev: |
|
803 | if self.cache and self.cache[1] >= base and self.cache[1] < rev: | |
799 | base = self.cache[1] |
|
804 | base = self.cache[1] | |
800 | text = self.cache[2] |
|
805 | text = self.cache[2] | |
801 | self.loadindex(base, rev + 1) |
|
806 | self.loadindex(base, rev + 1) | |
802 | else: |
|
807 | else: | |
803 | self.loadindex(base, rev + 1) |
|
808 | self.loadindex(base, rev + 1) | |
804 | text = self.chunk(base, df=df) |
|
809 | text = self.chunk(base, df=df) | |
805 |
|
810 | |||
806 | bins = [] |
|
811 | bins = [] | |
807 | for r in xrange(base + 1, rev + 1): |
|
812 | for r in xrange(base + 1, rev + 1): | |
808 | bins.append(self.chunk(r, df=df)) |
|
813 | bins.append(self.chunk(r, df=df)) | |
809 |
|
814 | |||
810 | text = self.patches(text, bins) |
|
815 | text = self.patches(text, bins) | |
811 |
|
816 | |||
812 | p1, p2 = self.parents(node) |
|
817 | p1, p2 = self.parents(node) | |
813 | if node != hash(text, p1, p2): |
|
818 | if node != hash(text, p1, p2): | |
814 | raise RevlogError(_("integrity check failed on %s:%d") |
|
819 | raise RevlogError(_("integrity check failed on %s:%d") | |
815 | % (self.datafile, rev)) |
|
820 | % (self.datafile, rev)) | |
816 |
|
821 | |||
817 | self.cache = (node, rev, text) |
|
822 | self.cache = (node, rev, text) | |
818 | return text |
|
823 | return text | |
819 |
|
824 | |||
820 | def checkinlinesize(self, tr, fp=None): |
|
825 | def checkinlinesize(self, tr, fp=None): | |
821 | if not self.inlinedata(): |
|
826 | if not self.inlinedata(): | |
822 | return |
|
827 | return | |
823 | if not fp: |
|
828 | if not fp: | |
824 | fp = self.opener(self.indexfile, 'r') |
|
829 | fp = self.opener(self.indexfile, 'r') | |
825 | fp.seek(0, 2) |
|
830 | fp.seek(0, 2) | |
826 | size = fp.tell() |
|
831 | size = fp.tell() | |
827 | if size < 131072: |
|
832 | if size < 131072: | |
828 | return |
|
833 | return | |
829 | trinfo = tr.find(self.indexfile) |
|
834 | trinfo = tr.find(self.indexfile) | |
830 | if trinfo == None: |
|
835 | if trinfo == None: | |
831 | raise RevlogError(_("%s not found in the transaction" % |
|
836 | raise RevlogError(_("%s not found in the transaction" % | |
832 | self.indexfile)) |
|
837 | self.indexfile)) | |
833 |
|
838 | |||
834 | trindex = trinfo[2] |
|
839 | trindex = trinfo[2] | |
835 | dataoff = self.start(trindex) |
|
840 | dataoff = self.start(trindex) | |
836 |
|
841 | |||
837 | tr.add(self.datafile, dataoff) |
|
842 | tr.add(self.datafile, dataoff) | |
838 | df = self.opener(self.datafile, 'w') |
|
843 | df = self.opener(self.datafile, 'w') | |
839 | calc = struct.calcsize(self.indexformat) |
|
844 | calc = struct.calcsize(self.indexformat) | |
840 | for r in xrange(self.count()): |
|
845 | for r in xrange(self.count()): | |
841 | start = self.start(r) + (r + 1) * calc |
|
846 | start = self.start(r) + (r + 1) * calc | |
842 | length = self.length(r) |
|
847 | length = self.length(r) | |
843 | fp.seek(start) |
|
848 | fp.seek(start) | |
844 | d = fp.read(length) |
|
849 | d = fp.read(length) | |
845 | df.write(d) |
|
850 | df.write(d) | |
846 | fp.close() |
|
851 | fp.close() | |
847 | df.close() |
|
852 | df.close() | |
848 | fp = self.opener(self.indexfile, 'w', atomictemp=True) |
|
853 | fp = self.opener(self.indexfile, 'w', atomictemp=True) | |
849 | self.version &= ~(REVLOGNGINLINEDATA) |
|
854 | self.version &= ~(REVLOGNGINLINEDATA) | |
850 | if self.count(): |
|
855 | if self.count(): | |
851 | x = self.index[0] |
|
856 | x = self.index[0] | |
852 | e = struct.pack(self.indexformat, *x)[4:] |
|
857 | e = struct.pack(self.indexformat, *x)[4:] | |
853 | l = struct.pack(versionformat, self.version) |
|
858 | l = struct.pack(versionformat, self.version) | |
854 | fp.write(l) |
|
859 | fp.write(l) | |
855 | fp.write(e) |
|
860 | fp.write(e) | |
856 |
|
861 | |||
857 | for i in xrange(1, self.count()): |
|
862 | for i in xrange(1, self.count()): | |
858 | x = self.index[i] |
|
863 | x = self.index[i] | |
859 | e = struct.pack(self.indexformat, *x) |
|
864 | e = struct.pack(self.indexformat, *x) | |
860 | fp.write(e) |
|
865 | fp.write(e) | |
861 |
|
866 | |||
862 | # if we don't call rename, the temp file will never replace the |
|
867 | # if we don't call rename, the temp file will never replace the | |
863 | # real index |
|
868 | # real index | |
864 | fp.rename() |
|
869 | fp.rename() | |
865 |
|
870 | |||
866 | tr.replace(self.indexfile, trindex * calc) |
|
871 | tr.replace(self.indexfile, trindex * calc) | |
867 | self.chunkcache = None |
|
872 | self.chunkcache = None | |
868 |
|
873 | |||
869 | def addrevision(self, text, transaction, link, p1=None, p2=None, d=None): |
|
874 | def addrevision(self, text, transaction, link, p1=None, p2=None, d=None): | |
870 | """add a revision to the log |
|
875 | """add a revision to the log | |
871 |
|
876 | |||
872 | text - the revision data to add |
|
877 | text - the revision data to add | |
873 | transaction - the transaction object used for rollback |
|
878 | transaction - the transaction object used for rollback | |
874 | link - the linkrev data to add |
|
879 | link - the linkrev data to add | |
875 | p1, p2 - the parent nodeids of the revision |
|
880 | p1, p2 - the parent nodeids of the revision | |
876 | d - an optional precomputed delta |
|
881 | d - an optional precomputed delta | |
877 | """ |
|
882 | """ | |
878 | if text is None: text = "" |
|
883 | if text is None: text = "" | |
879 | if p1 is None: p1 = self.tip() |
|
884 | if p1 is None: p1 = self.tip() | |
880 | if p2 is None: p2 = nullid |
|
885 | if p2 is None: p2 = nullid | |
881 |
|
886 | |||
882 | node = hash(text, p1, p2) |
|
887 | node = hash(text, p1, p2) | |
883 |
|
888 | |||
884 | if node in self.nodemap: |
|
889 | if node in self.nodemap: | |
885 | return node |
|
890 | return node | |
886 |
|
891 | |||
887 | n = self.count() |
|
892 | n = self.count() | |
888 | t = n - 1 |
|
893 | t = n - 1 | |
889 |
|
894 | |||
890 | if n: |
|
895 | if n: | |
891 | base = self.base(t) |
|
896 | base = self.base(t) | |
892 | start = self.start(base) |
|
897 | start = self.start(base) | |
893 | end = self.end(t) |
|
898 | end = self.end(t) | |
894 | if not d: |
|
899 | if not d: | |
895 | prev = self.revision(self.tip()) |
|
900 | prev = self.revision(self.tip()) | |
896 | d = self.diff(prev, str(text)) |
|
901 | d = self.diff(prev, str(text)) | |
897 | data = compress(d) |
|
902 | data = compress(d) | |
898 | l = len(data[1]) + len(data[0]) |
|
903 | l = len(data[1]) + len(data[0]) | |
899 | dist = end - start + l |
|
904 | dist = end - start + l | |
900 |
|
905 | |||
901 | # full versions are inserted when the needed deltas |
|
906 | # full versions are inserted when the needed deltas | |
902 | # become comparable to the uncompressed text |
|
907 | # become comparable to the uncompressed text | |
903 | if not n or dist > len(text) * 2: |
|
908 | if not n or dist > len(text) * 2: | |
904 | data = compress(text) |
|
909 | data = compress(text) | |
905 | l = len(data[1]) + len(data[0]) |
|
910 | l = len(data[1]) + len(data[0]) | |
906 | base = n |
|
911 | base = n | |
907 | else: |
|
912 | else: | |
908 | base = self.base(t) |
|
913 | base = self.base(t) | |
909 |
|
914 | |||
910 | offset = 0 |
|
915 | offset = 0 | |
911 | if t >= 0: |
|
916 | if t >= 0: | |
912 | offset = self.end(t) |
|
917 | offset = self.end(t) | |
913 |
|
918 | |||
914 | if self.version == REVLOGV0: |
|
919 | if self.version == REVLOGV0: | |
915 | e = (offset, l, base, link, p1, p2, node) |
|
920 | e = (offset, l, base, link, p1, p2, node) | |
916 | else: |
|
921 | else: | |
917 | e = (self.offset_type(offset, 0), l, len(text), |
|
922 | e = (self.offset_type(offset, 0), l, len(text), | |
918 | base, link, self.rev(p1), self.rev(p2), node) |
|
923 | base, link, self.rev(p1), self.rev(p2), node) | |
919 |
|
924 | |||
920 | self.index.append(e) |
|
925 | self.index.append(e) | |
921 | self.nodemap[node] = n |
|
926 | self.nodemap[node] = n | |
922 | entry = struct.pack(self.indexformat, *e) |
|
927 | entry = struct.pack(self.indexformat, *e) | |
923 |
|
928 | |||
924 | if not self.inlinedata(): |
|
929 | if not self.inlinedata(): | |
925 | transaction.add(self.datafile, offset) |
|
930 | transaction.add(self.datafile, offset) | |
926 | transaction.add(self.indexfile, n * len(entry)) |
|
931 | transaction.add(self.indexfile, n * len(entry)) | |
927 | f = self.opener(self.datafile, "a") |
|
932 | f = self.opener(self.datafile, "a") | |
928 | if data[0]: |
|
933 | if data[0]: | |
929 | f.write(data[0]) |
|
934 | f.write(data[0]) | |
930 | f.write(data[1]) |
|
935 | f.write(data[1]) | |
931 | f.close() |
|
936 | f.close() | |
932 | f = self.opener(self.indexfile, "a") |
|
937 | f = self.opener(self.indexfile, "a") | |
933 | else: |
|
938 | else: | |
934 | f = self.opener(self.indexfile, "a+") |
|
939 | f = self.opener(self.indexfile, "a+") | |
935 | f.seek(0, 2) |
|
940 | f.seek(0, 2) | |
936 | transaction.add(self.indexfile, f.tell(), self.count() - 1) |
|
941 | transaction.add(self.indexfile, f.tell(), self.count() - 1) | |
937 |
|
942 | |||
938 | if len(self.index) == 1 and self.version != REVLOGV0: |
|
943 | if len(self.index) == 1 and self.version != REVLOGV0: | |
939 | l = struct.pack(versionformat, self.version) |
|
944 | l = struct.pack(versionformat, self.version) | |
940 | f.write(l) |
|
945 | f.write(l) | |
941 | entry = entry[4:] |
|
946 | entry = entry[4:] | |
942 |
|
947 | |||
943 | f.write(entry) |
|
948 | f.write(entry) | |
944 |
|
949 | |||
945 | if self.inlinedata(): |
|
950 | if self.inlinedata(): | |
946 | f.write(data[0]) |
|
951 | f.write(data[0]) | |
947 | f.write(data[1]) |
|
952 | f.write(data[1]) | |
948 | self.checkinlinesize(transaction, f) |
|
953 | self.checkinlinesize(transaction, f) | |
949 |
|
954 | |||
950 | self.cache = (node, n, text) |
|
955 | self.cache = (node, n, text) | |
951 | return node |
|
956 | return node | |
952 |
|
957 | |||
953 | def ancestor(self, a, b): |
|
958 | def ancestor(self, a, b): | |
954 | """calculate the least common ancestor of nodes a and b""" |
|
959 | """calculate the least common ancestor of nodes a and b""" | |
955 |
|
960 | |||
956 | # start with some short cuts for the linear cases |
|
961 | # start with some short cuts for the linear cases | |
957 | if a == b: |
|
962 | if a == b: | |
958 | return a |
|
963 | return a | |
959 | ra = self.rev(a) |
|
964 | ra = self.rev(a) | |
960 | rb = self.rev(b) |
|
965 | rb = self.rev(b) | |
961 | if ra < rb: |
|
966 | if ra < rb: | |
962 | last = b |
|
967 | last = b | |
963 | first = a |
|
968 | first = a | |
964 | else: |
|
969 | else: | |
965 | last = a |
|
970 | last = a | |
966 | first = b |
|
971 | first = b | |
967 |
|
972 | |||
968 | # reachable won't include stop in the list, so we have to use a parent |
|
973 | # reachable won't include stop in the list, so we have to use a parent | |
969 | reachable = self.reachable(last, stop=self.parents(first)[0]) |
|
974 | reachable = self.reachable(last, stop=self.parents(first)[0]) | |
970 | if first in reachable: |
|
975 | if first in reachable: | |
971 | return first |
|
976 | return first | |
972 |
|
977 | |||
973 | # calculate the distance of every node from root |
|
978 | # calculate the distance of every node from root | |
974 | dist = {nullid: 0} |
|
979 | dist = {nullid: 0} | |
975 | for i in xrange(self.count()): |
|
980 | for i in xrange(self.count()): | |
976 | n = self.node(i) |
|
981 | n = self.node(i) | |
977 | p1, p2 = self.parents(n) |
|
982 | p1, p2 = self.parents(n) | |
978 | dist[n] = max(dist[p1], dist[p2]) + 1 |
|
983 | dist[n] = max(dist[p1], dist[p2]) + 1 | |
979 |
|
984 | |||
980 | # traverse ancestors in order of decreasing distance from root |
|
985 | # traverse ancestors in order of decreasing distance from root | |
981 | def ancestors(node): |
|
986 | def ancestors(node): | |
982 | # we store negative distances because heap returns smallest member |
|
987 | # we store negative distances because heap returns smallest member | |
983 | h = [(-dist[node], node)] |
|
988 | h = [(-dist[node], node)] | |
984 | seen = {} |
|
989 | seen = {} | |
985 | while h: |
|
990 | while h: | |
986 | d, n = heapq.heappop(h) |
|
991 | d, n = heapq.heappop(h) | |
987 | if n not in seen: |
|
992 | if n not in seen: | |
988 | seen[n] = 1 |
|
993 | seen[n] = 1 | |
989 | yield (-d, n) |
|
994 | yield (-d, n) | |
990 | for p in self.parents(n): |
|
995 | for p in self.parents(n): | |
991 | heapq.heappush(h, (-dist[p], p)) |
|
996 | heapq.heappush(h, (-dist[p], p)) | |
992 |
|
997 | |||
993 | def generations(node): |
|
998 | def generations(node): | |
994 | sg, s = None, {} |
|
999 | sg, s = None, {} | |
995 | for g,n in ancestors(node): |
|
1000 | for g,n in ancestors(node): | |
996 | if g != sg: |
|
1001 | if g != sg: | |
997 | if sg: |
|
1002 | if sg: | |
998 | yield sg, s |
|
1003 | yield sg, s | |
999 | sg, s = g, {n:1} |
|
1004 | sg, s = g, {n:1} | |
1000 | else: |
|
1005 | else: | |
1001 | s[n] = 1 |
|
1006 | s[n] = 1 | |
1002 | yield sg, s |
|
1007 | yield sg, s | |
1003 |
|
1008 | |||
1004 | x = generations(a) |
|
1009 | x = generations(a) | |
1005 | y = generations(b) |
|
1010 | y = generations(b) | |
1006 | gx = x.next() |
|
1011 | gx = x.next() | |
1007 | gy = y.next() |
|
1012 | gy = y.next() | |
1008 |
|
1013 | |||
1009 | # increment each ancestor list until it is closer to root than |
|
1014 | # increment each ancestor list until it is closer to root than | |
1010 | # the other, or they match |
|
1015 | # the other, or they match | |
1011 | while 1: |
|
1016 | while 1: | |
1012 | #print "ancestor gen %s %s" % (gx[0], gy[0]) |
|
1017 | #print "ancestor gen %s %s" % (gx[0], gy[0]) | |
1013 | if gx[0] == gy[0]: |
|
1018 | if gx[0] == gy[0]: | |
1014 | # find the intersection |
|
1019 | # find the intersection | |
1015 | i = [ n for n in gx[1] if n in gy[1] ] |
|
1020 | i = [ n for n in gx[1] if n in gy[1] ] | |
1016 | if i: |
|
1021 | if i: | |
1017 | return i[0] |
|
1022 | return i[0] | |
1018 | else: |
|
1023 | else: | |
1019 | #print "next" |
|
1024 | #print "next" | |
1020 | gy = y.next() |
|
1025 | gy = y.next() | |
1021 | gx = x.next() |
|
1026 | gx = x.next() | |
1022 | elif gx[0] < gy[0]: |
|
1027 | elif gx[0] < gy[0]: | |
1023 | #print "next y" |
|
1028 | #print "next y" | |
1024 | gy = y.next() |
|
1029 | gy = y.next() | |
1025 | else: |
|
1030 | else: | |
1026 | #print "next x" |
|
1031 | #print "next x" | |
1027 | gx = x.next() |
|
1032 | gx = x.next() | |
1028 |
|
1033 | |||
1029 | def group(self, nodelist, lookup, infocollect=None): |
|
1034 | def group(self, nodelist, lookup, infocollect=None): | |
1030 | """calculate a delta group |
|
1035 | """calculate a delta group | |
1031 |
|
1036 | |||
1032 | Given a list of changeset revs, return a set of deltas and |
|
1037 | Given a list of changeset revs, return a set of deltas and | |
1033 | metadata corresponding to nodes. the first delta is |
|
1038 | metadata corresponding to nodes. the first delta is | |
1034 | parent(nodes[0]) -> nodes[0] the receiver is guaranteed to |
|
1039 | parent(nodes[0]) -> nodes[0] the receiver is guaranteed to | |
1035 | have this parent as it has all history before these |
|
1040 | have this parent as it has all history before these | |
1036 | changesets. parent is parent[0] |
|
1041 | changesets. parent is parent[0] | |
1037 | """ |
|
1042 | """ | |
1038 | revs = [self.rev(n) for n in nodelist] |
|
1043 | revs = [self.rev(n) for n in nodelist] | |
1039 |
|
1044 | |||
1040 | # if we don't have any revisions touched by these changesets, bail |
|
1045 | # if we don't have any revisions touched by these changesets, bail | |
1041 | if not revs: |
|
1046 | if not revs: | |
1042 | yield changegroup.closechunk() |
|
1047 | yield changegroup.closechunk() | |
1043 | return |
|
1048 | return | |
1044 |
|
1049 | |||
1045 | # add the parent of the first rev |
|
1050 | # add the parent of the first rev | |
1046 | p = self.parents(self.node(revs[0]))[0] |
|
1051 | p = self.parents(self.node(revs[0]))[0] | |
1047 | revs.insert(0, self.rev(p)) |
|
1052 | revs.insert(0, self.rev(p)) | |
1048 |
|
1053 | |||
1049 | # build deltas |
|
1054 | # build deltas | |
1050 | for d in xrange(0, len(revs) - 1): |
|
1055 | for d in xrange(0, len(revs) - 1): | |
1051 | a, b = revs[d], revs[d + 1] |
|
1056 | a, b = revs[d], revs[d + 1] | |
1052 | nb = self.node(b) |
|
1057 | nb = self.node(b) | |
1053 |
|
1058 | |||
1054 | if infocollect is not None: |
|
1059 | if infocollect is not None: | |
1055 | infocollect(nb) |
|
1060 | infocollect(nb) | |
1056 |
|
1061 | |||
1057 | d = self.revdiff(a, b) |
|
1062 | d = self.revdiff(a, b) | |
1058 | p = self.parents(nb) |
|
1063 | p = self.parents(nb) | |
1059 | meta = nb + p[0] + p[1] + lookup(nb) |
|
1064 | meta = nb + p[0] + p[1] + lookup(nb) | |
1060 | yield changegroup.genchunk("%s%s" % (meta, d)) |
|
1065 | yield changegroup.genchunk("%s%s" % (meta, d)) | |
1061 |
|
1066 | |||
1062 | yield changegroup.closechunk() |
|
1067 | yield changegroup.closechunk() | |
1063 |
|
1068 | |||
1064 | def addgroup(self, revs, linkmapper, transaction, unique=0): |
|
1069 | def addgroup(self, revs, linkmapper, transaction, unique=0): | |
1065 | """ |
|
1070 | """ | |
1066 | add a delta group |
|
1071 | add a delta group | |
1067 |
|
1072 | |||
1068 | given a set of deltas, add them to the revision log. the |
|
1073 | given a set of deltas, add them to the revision log. the | |
1069 | first delta is against its parent, which should be in our |
|
1074 | first delta is against its parent, which should be in our | |
1070 | log, the rest are against the previous delta. |
|
1075 | log, the rest are against the previous delta. | |
1071 | """ |
|
1076 | """ | |
1072 |
|
1077 | |||
1073 | #track the base of the current delta log |
|
1078 | #track the base of the current delta log | |
1074 | r = self.count() |
|
1079 | r = self.count() | |
1075 | t = r - 1 |
|
1080 | t = r - 1 | |
1076 | node = None |
|
1081 | node = None | |
1077 |
|
1082 | |||
1078 | base = prev = -1 |
|
1083 | base = prev = -1 | |
1079 | start = end = textlen = 0 |
|
1084 | start = end = textlen = 0 | |
1080 | if r: |
|
1085 | if r: | |
1081 | end = self.end(t) |
|
1086 | end = self.end(t) | |
1082 |
|
1087 | |||
1083 | ifh = self.opener(self.indexfile, "a+") |
|
1088 | ifh = self.opener(self.indexfile, "a+") | |
1084 | ifh.seek(0, 2) |
|
1089 | ifh.seek(0, 2) | |
1085 | transaction.add(self.indexfile, ifh.tell(), self.count()) |
|
1090 | transaction.add(self.indexfile, ifh.tell(), self.count()) | |
1086 | if self.inlinedata(): |
|
1091 | if self.inlinedata(): | |
1087 | dfh = None |
|
1092 | dfh = None | |
1088 | else: |
|
1093 | else: | |
1089 | transaction.add(self.datafile, end) |
|
1094 | transaction.add(self.datafile, end) | |
1090 | dfh = self.opener(self.datafile, "a") |
|
1095 | dfh = self.opener(self.datafile, "a") | |
1091 |
|
1096 | |||
1092 | # loop through our set of deltas |
|
1097 | # loop through our set of deltas | |
1093 | chain = None |
|
1098 | chain = None | |
1094 | for chunk in revs: |
|
1099 | for chunk in revs: | |
1095 | node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80]) |
|
1100 | node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80]) | |
1096 | link = linkmapper(cs) |
|
1101 | link = linkmapper(cs) | |
1097 | if node in self.nodemap: |
|
1102 | if node in self.nodemap: | |
1098 | # this can happen if two branches make the same change |
|
1103 | # this can happen if two branches make the same change | |
1099 | # if unique: |
|
1104 | # if unique: | |
1100 | # raise RevlogError(_("already have %s") % hex(node[:4])) |
|
1105 | # raise RevlogError(_("already have %s") % hex(node[:4])) | |
1101 | chain = node |
|
1106 | chain = node | |
1102 | continue |
|
1107 | continue | |
1103 | delta = chunk[80:] |
|
1108 | delta = chunk[80:] | |
1104 |
|
1109 | |||
1105 | for p in (p1, p2): |
|
1110 | for p in (p1, p2): | |
1106 | if not p in self.nodemap: |
|
1111 | if not p in self.nodemap: | |
1107 | raise RevlogError(_("unknown parent %s") % short(p1)) |
|
1112 | raise RevlogError(_("unknown parent %s") % short(p1)) | |
1108 |
|
1113 | |||
1109 | if not chain: |
|
1114 | if not chain: | |
1110 | # retrieve the parent revision of the delta chain |
|
1115 | # retrieve the parent revision of the delta chain | |
1111 | chain = p1 |
|
1116 | chain = p1 | |
1112 | if not chain in self.nodemap: |
|
1117 | if not chain in self.nodemap: | |
1113 | raise RevlogError(_("unknown base %s") % short(chain[:4])) |
|
1118 | raise RevlogError(_("unknown base %s") % short(chain[:4])) | |
1114 |
|
1119 | |||
1115 | # full versions are inserted when the needed deltas become |
|
1120 | # full versions are inserted when the needed deltas become | |
1116 | # comparable to the uncompressed text or when the previous |
|
1121 | # comparable to the uncompressed text or when the previous | |
1117 | # version is not the one we have a delta against. We use |
|
1122 | # version is not the one we have a delta against. We use | |
1118 | # the size of the previous full rev as a proxy for the |
|
1123 | # the size of the previous full rev as a proxy for the | |
1119 | # current size. |
|
1124 | # current size. | |
1120 |
|
1125 | |||
1121 | if chain == prev: |
|
1126 | if chain == prev: | |
1122 | tempd = compress(delta) |
|
1127 | tempd = compress(delta) | |
1123 | cdelta = tempd[0] + tempd[1] |
|
1128 | cdelta = tempd[0] + tempd[1] | |
1124 | textlen = mdiff.patchedsize(textlen, delta) |
|
1129 | textlen = mdiff.patchedsize(textlen, delta) | |
1125 |
|
1130 | |||
1126 | if chain != prev or (end - start + len(cdelta)) > textlen * 2: |
|
1131 | if chain != prev or (end - start + len(cdelta)) > textlen * 2: | |
1127 | # flush our writes here so we can read it in revision |
|
1132 | # flush our writes here so we can read it in revision | |
1128 | if dfh: |
|
1133 | if dfh: | |
1129 | dfh.flush() |
|
1134 | dfh.flush() | |
1130 | ifh.flush() |
|
1135 | ifh.flush() | |
1131 | text = self.revision(chain) |
|
1136 | text = self.revision(chain) | |
1132 | text = self.patches(text, [delta]) |
|
1137 | text = self.patches(text, [delta]) | |
1133 | chk = self.addrevision(text, transaction, link, p1, p2) |
|
1138 | chk = self.addrevision(text, transaction, link, p1, p2) | |
1134 | if chk != node: |
|
1139 | if chk != node: | |
1135 | raise RevlogError(_("consistency error adding group")) |
|
1140 | raise RevlogError(_("consistency error adding group")) | |
1136 | textlen = len(text) |
|
1141 | textlen = len(text) | |
1137 | else: |
|
1142 | else: | |
1138 | if self.version == REVLOGV0: |
|
1143 | if self.version == REVLOGV0: | |
1139 | e = (end, len(cdelta), base, link, p1, p2, node) |
|
1144 | e = (end, len(cdelta), base, link, p1, p2, node) | |
1140 | else: |
|
1145 | else: | |
1141 | e = (self.offset_type(end, 0), len(cdelta), textlen, base, |
|
1146 | e = (self.offset_type(end, 0), len(cdelta), textlen, base, | |
1142 | link, self.rev(p1), self.rev(p2), node) |
|
1147 | link, self.rev(p1), self.rev(p2), node) | |
1143 | self.index.append(e) |
|
1148 | self.index.append(e) | |
1144 | self.nodemap[node] = r |
|
1149 | self.nodemap[node] = r | |
1145 | if self.inlinedata(): |
|
1150 | if self.inlinedata(): | |
1146 | ifh.write(struct.pack(self.indexformat, *e)) |
|
1151 | ifh.write(struct.pack(self.indexformat, *e)) | |
1147 | ifh.write(cdelta) |
|
1152 | ifh.write(cdelta) | |
1148 | self.checkinlinesize(transaction, ifh) |
|
1153 | self.checkinlinesize(transaction, ifh) | |
1149 | if not self.inlinedata(): |
|
1154 | if not self.inlinedata(): | |
1150 | dfh = self.opener(self.datafile, "a") |
|
1155 | dfh = self.opener(self.datafile, "a") | |
1151 | ifh = self.opener(self.indexfile, "a") |
|
1156 | ifh = self.opener(self.indexfile, "a") | |
1152 | else: |
|
1157 | else: | |
1153 | if not dfh: |
|
1158 | if not dfh: | |
1154 | # addrevision switched from inline to conventional |
|
1159 | # addrevision switched from inline to conventional | |
1155 | # reopen the index |
|
1160 | # reopen the index | |
1156 | dfh = self.opener(self.datafile, "a") |
|
1161 | dfh = self.opener(self.datafile, "a") | |
1157 | ifh = self.opener(self.indexfile, "a") |
|
1162 | ifh = self.opener(self.indexfile, "a") | |
1158 | dfh.write(cdelta) |
|
1163 | dfh.write(cdelta) | |
1159 | ifh.write(struct.pack(self.indexformat, *e)) |
|
1164 | ifh.write(struct.pack(self.indexformat, *e)) | |
1160 |
|
1165 | |||
1161 | t, r, chain, prev = r, r + 1, node, node |
|
1166 | t, r, chain, prev = r, r + 1, node, node | |
1162 | base = self.base(t) |
|
1167 | base = self.base(t) | |
1163 | start = self.start(base) |
|
1168 | start = self.start(base) | |
1164 | end = self.end(t) |
|
1169 | end = self.end(t) | |
1165 |
|
1170 | |||
1166 | if node is None: |
|
1171 | if node is None: | |
1167 | raise RevlogError(_("group to be added is empty")) |
|
1172 | raise RevlogError(_("group to be added is empty")) | |
1168 | return node |
|
1173 | return node | |
1169 |
|
1174 | |||
1170 | def strip(self, rev, minlink): |
|
1175 | def strip(self, rev, minlink): | |
1171 | if self.count() == 0 or rev >= self.count(): |
|
1176 | if self.count() == 0 or rev >= self.count(): | |
1172 | return |
|
1177 | return | |
1173 |
|
1178 | |||
1174 | if isinstance(self.index, lazyindex): |
|
1179 | if isinstance(self.index, lazyindex): | |
1175 | self.loadindexmap() |
|
1180 | self.loadindexmap() | |
1176 |
|
1181 | |||
1177 | # When stripping away a revision, we need to make sure it |
|
1182 | # When stripping away a revision, we need to make sure it | |
1178 | # does not actually belong to an older changeset. |
|
1183 | # does not actually belong to an older changeset. | |
1179 | # The minlink parameter defines the oldest revision |
|
1184 | # The minlink parameter defines the oldest revision | |
1180 | # we're allowed to strip away. |
|
1185 | # we're allowed to strip away. | |
1181 | while minlink > self.index[rev][-4]: |
|
1186 | while minlink > self.index[rev][-4]: | |
1182 | rev += 1 |
|
1187 | rev += 1 | |
1183 | if rev >= self.count(): |
|
1188 | if rev >= self.count(): | |
1184 | return |
|
1189 | return | |
1185 |
|
1190 | |||
1186 | # first truncate the files on disk |
|
1191 | # first truncate the files on disk | |
1187 | end = self.start(rev) |
|
1192 | end = self.start(rev) | |
1188 | if not self.inlinedata(): |
|
1193 | if not self.inlinedata(): | |
1189 | df = self.opener(self.datafile, "a") |
|
1194 | df = self.opener(self.datafile, "a") | |
1190 | df.truncate(end) |
|
1195 | df.truncate(end) | |
1191 | end = rev * struct.calcsize(self.indexformat) |
|
1196 | end = rev * struct.calcsize(self.indexformat) | |
1192 | else: |
|
1197 | else: | |
1193 | end += rev * struct.calcsize(self.indexformat) |
|
1198 | end += rev * struct.calcsize(self.indexformat) | |
1194 |
|
1199 | |||
1195 | indexf = self.opener(self.indexfile, "a") |
|
1200 | indexf = self.opener(self.indexfile, "a") | |
1196 | indexf.truncate(end) |
|
1201 | indexf.truncate(end) | |
1197 |
|
1202 | |||
1198 | # then reset internal state in memory to forget those revisions |
|
1203 | # then reset internal state in memory to forget those revisions | |
1199 | self.cache = None |
|
1204 | self.cache = None | |
1200 | self.chunkcache = None |
|
1205 | self.chunkcache = None | |
1201 | for x in xrange(rev, self.count()): |
|
1206 | for x in xrange(rev, self.count()): | |
1202 | del self.nodemap[self.node(x)] |
|
1207 | del self.nodemap[self.node(x)] | |
1203 |
|
1208 | |||
1204 | del self.index[rev:] |
|
1209 | del self.index[rev:] | |
1205 |
|
1210 | |||
1206 | def checksize(self): |
|
1211 | def checksize(self): | |
1207 | expected = 0 |
|
1212 | expected = 0 | |
1208 | if self.count(): |
|
1213 | if self.count(): | |
1209 | expected = self.end(self.count() - 1) |
|
1214 | expected = self.end(self.count() - 1) | |
1210 |
|
1215 | |||
1211 | try: |
|
1216 | try: | |
1212 | f = self.opener(self.datafile) |
|
1217 | f = self.opener(self.datafile) | |
1213 | f.seek(0, 2) |
|
1218 | f.seek(0, 2) | |
1214 | actual = f.tell() |
|
1219 | actual = f.tell() | |
1215 | dd = actual - expected |
|
1220 | dd = actual - expected | |
1216 | except IOError, inst: |
|
1221 | except IOError, inst: | |
1217 | if inst.errno != errno.ENOENT: |
|
1222 | if inst.errno != errno.ENOENT: | |
1218 | raise |
|
1223 | raise | |
1219 | dd = 0 |
|
1224 | dd = 0 | |
1220 |
|
1225 | |||
1221 | try: |
|
1226 | try: | |
1222 | f = self.opener(self.indexfile) |
|
1227 | f = self.opener(self.indexfile) | |
1223 | f.seek(0, 2) |
|
1228 | f.seek(0, 2) | |
1224 | actual = f.tell() |
|
1229 | actual = f.tell() | |
1225 | s = struct.calcsize(self.indexformat) |
|
1230 | s = struct.calcsize(self.indexformat) | |
1226 | i = actual / s |
|
1231 | i = actual / s | |
1227 | di = actual - (i * s) |
|
1232 | di = actual - (i * s) | |
1228 | if self.inlinedata(): |
|
1233 | if self.inlinedata(): | |
1229 | databytes = 0 |
|
1234 | databytes = 0 | |
1230 | for r in xrange(self.count()): |
|
1235 | for r in xrange(self.count()): | |
1231 | databytes += self.length(r) |
|
1236 | databytes += self.length(r) | |
1232 | dd = 0 |
|
1237 | dd = 0 | |
1233 | di = actual - self.count() * s - databytes |
|
1238 | di = actual - self.count() * s - databytes | |
1234 | except IOError, inst: |
|
1239 | except IOError, inst: | |
1235 | if inst.errno != errno.ENOENT: |
|
1240 | if inst.errno != errno.ENOENT: | |
1236 | raise |
|
1241 | raise | |
1237 | di = 0 |
|
1242 | di = 0 | |
1238 |
|
1243 | |||
1239 | return (dd, di) |
|
1244 | return (dd, di) | |
1240 |
|
1245 | |||
1241 |
|
1246 |
General Comments 0
You need to be logged in to leave comments.
Login now